1
mirror of https://github.com/home-assistant/core synced 2024-08-02 23:40:32 +02:00
ha-core/homeassistant/core.py

1815 lines
59 KiB
Python
Raw Normal View History

"""
2015-12-28 06:14:35 +01:00
Core components of Home Assistant.
Home Assistant is a Home Automation framework for observing the state
of entities and react to changes.
"""
2021-03-17 17:34:55 +01:00
from __future__ import annotations
import asyncio
2021-04-25 02:39:24 +02:00
from collections.abc import Awaitable, Collection, Coroutine, Iterable, Mapping
import datetime
2016-02-19 06:27:50 +01:00
import enum
import functools
2013-09-30 09:20:27 +02:00
import logging
2016-02-19 06:27:50 +01:00
import os
import pathlib
import re
import threading
from time import monotonic
2016-02-10 08:27:01 +01:00
from types import MappingProxyType
2021-04-25 02:39:24 +02:00
from typing import TYPE_CHECKING, Any, Callable, Optional, TypeVar, cast
from urllib.parse import urlparse
import attr
import voluptuous as vol
import yarl
from homeassistant import block_async_io, loader, util
2016-02-19 06:27:50 +01:00
from homeassistant.const import (
2019-07-31 21:25:30 +02:00
ATTR_DOMAIN,
ATTR_FRIENDLY_NAME,
ATTR_NOW,
ATTR_SECONDS,
2019-07-31 21:25:30 +02:00
ATTR_SERVICE,
ATTR_SERVICE_DATA,
CONF_UNIT_SYSTEM_IMPERIAL,
EVENT_CALL_SERVICE,
EVENT_CORE_CONFIG_UPDATE,
EVENT_HOMEASSISTANT_CLOSE,
EVENT_HOMEASSISTANT_FINAL_WRITE,
2019-07-31 21:25:30 +02:00
EVENT_HOMEASSISTANT_START,
EVENT_HOMEASSISTANT_STARTED,
2019-07-31 21:25:30 +02:00
EVENT_HOMEASSISTANT_STOP,
EVENT_SERVICE_REGISTERED,
EVENT_SERVICE_REMOVED,
2019-07-31 21:25:30 +02:00
EVENT_STATE_CHANGED,
EVENT_TIME_CHANGED,
EVENT_TIMER_OUT_OF_SYNC,
2020-04-12 21:44:56 +02:00
LENGTH_METERS,
2019-07-31 21:25:30 +02:00
MATCH_ALL,
MAX_LENGTH_EVENT_EVENT_TYPE,
MAX_LENGTH_STATE_STATE,
2019-07-31 21:25:30 +02:00
__version__,
)
2016-02-19 06:27:50 +01:00
from homeassistant.exceptions import (
2019-07-31 21:25:30 +02:00
HomeAssistantError,
InvalidEntityFormatError,
InvalidStateError,
MaxLengthExceeded,
2019-07-31 21:25:30 +02:00
ServiceNotFound,
Unauthorized,
2019-07-31 21:25:30 +02:00
)
2021-02-10 14:31:11 +01:00
from homeassistant.util import location
from homeassistant.util.async_ import (
fire_coroutine_threadsafe,
run_callback_threadsafe,
shutdown_run_callback_threadsafe,
)
import homeassistant.util.dt as dt_util
from homeassistant.util.timeout import TimeoutManager
from homeassistant.util.unit_system import IMPERIAL_SYSTEM, METRIC_SYSTEM, UnitSystem
import homeassistant.util.uuid as uuid_util
2013-09-30 09:20:27 +02:00
# Typing imports that create a circular dependency
if TYPE_CHECKING:
from homeassistant.auth import AuthManager
from homeassistant.components.http import HomeAssistantHTTP
2020-08-29 08:23:55 +02:00
from homeassistant.config_entries import ConfigEntries
STAGE_1_SHUTDOWN_TIMEOUT = 100
STAGE_2_SHUTDOWN_TIMEOUT = 60
STAGE_3_SHUTDOWN_TIMEOUT = 30
block_async_io.enable()
2019-07-31 21:25:30 +02:00
T = TypeVar("T")
_UNDEF: dict = {} # Internal; not helpers.typing.UNDEFINED due to circular dependency
# pylint: disable=invalid-name
2019-07-31 21:25:30 +02:00
CALLABLE_T = TypeVar("CALLABLE_T", bound=Callable)
CALLBACK_TYPE = Callable[[], None]
# pylint: enable=invalid-name
2019-07-31 21:25:30 +02:00
CORE_STORAGE_KEY = "core.config"
CORE_STORAGE_VERSION = 1
2019-07-31 21:25:30 +02:00
DOMAIN = "homeassistant"
# How long to wait to log tasks that are blocking
BLOCK_LOG_TIMEOUT = 60
# How long we wait for the result of a service call
SERVICE_CALL_LIMIT = 10 # seconds
# Source of core configuration
2019-07-31 21:25:30 +02:00
SOURCE_DISCOVERED = "discovered"
SOURCE_STORAGE = "storage"
SOURCE_YAML = "yaml"
Add support for simultaneous runs of Script helper - Part 2 (#32442) * Add limit parameter to service call methods * Break out prep part of async_call_from_config for use elsewhere * Minor cleanup * Fix improper use of asyncio.wait * Fix state update Call change listener immediately if its a callback * Fix exception handling and logging * Merge Script helper if_running/run_mode parameters into script_mode - Remove background/blocking _ScriptRun subclasses which are no longer needed. * Add queued script mode * Disable timeout when making fully blocking script call * Don't call change listener when restarting script This makes restart mode behavior consistent with parallel & queue modes. * Changes per review - Call all script services (except script.turn_off) with no time limit. - Fix handling of lock in _QueuedScriptRun and add comments to make it clearer how this code works. * Changes per review 2 - Move cancel shielding "up" from _ScriptRun.async_run to Script.async_run (and apply to new style scripts only.) This makes sure Script class also properly handles cancellation which it wasn't doing before. - In _ScriptRun._async_call_service_step, instead of using script.turn_off service, just cancel service call and let it handle the cancellation accordingly. * Fix bugs - Add missing call to change listener in Script.async_run in cancelled path. - Cancel service task if ServiceRegistry.async_call cancelled. * Revert last changes to ServiceRegistry.async_call * Minor Script helper fixes & test improvements - Don't log asyncio.CancelledError exceptions. - Make change_listener a public attribute. - Test overhaul - Parametrize tests. - Use common test functions. - Mock timeout so tests don't need to wait for real time to elapse. - Add common function for waiting for script action step.
2020-03-12 00:34:50 +01:00
# How long to wait until things that run on startup have to finish.
TIMEOUT_EVENT_START = 15
2014-11-08 22:57:08 +01:00
_LOGGER = logging.getLogger(__name__)
2013-11-11 23:58:57 +01:00
2021-03-17 17:34:55 +01:00
def split_entity_id(entity_id: str) -> list[str]:
"""Split a state entity ID into domain and object ID."""
2016-08-09 05:21:40 +02:00
return entity_id.split(".", 1)
VALID_ENTITY_ID = re.compile(r"^(?!.+__)(?!_)[\da-z_]+(?<!_)\.(?!_)[\da-z_]+(?<!_)$")
2016-08-09 05:21:40 +02:00
def valid_entity_id(entity_id: str) -> bool:
"""Test if an entity ID is a valid format.
Format: <domain>.<entity> where both are slugs.
"""
return VALID_ENTITY_ID.match(entity_id) is not None
2016-08-09 05:21:40 +02:00
def valid_state(state: str) -> bool:
2018-01-27 20:58:27 +01:00
"""Test if a state is valid."""
return len(state) <= MAX_LENGTH_STATE_STATE
def callback(func: CALLABLE_T) -> CALLABLE_T:
"""Annotation to mark method as safe to call from within the event loop."""
2019-07-31 21:25:30 +02:00
setattr(func, "_hass_callback", True)
return func
def is_callback(func: Callable[..., Any]) -> bool:
"""Check if function is safe to be called in the event loop."""
2019-07-31 21:25:30 +02:00
return getattr(func, "_hass_callback", False) is True
@enum.unique
class HassJobType(enum.Enum):
"""Represent a job type."""
Coroutinefunction = 1
Callback = 2
Executor = 3
class HassJob:
"""Represent a job to be run later.
We check the callable type in advance
so we can avoid checking it every time
we run the job.
"""
__slots__ = ("job_type", "target")
def __init__(self, target: Callable) -> None:
"""Create a job object."""
if asyncio.iscoroutine(target):
raise ValueError("Coroutine not allowed to be passed to HassJob")
self.target = target
self.job_type = _get_callable_job_type(target)
def __repr__(self) -> str:
"""Return the job."""
return f"<Job {self.job_type} {self.target}>"
def _get_callable_job_type(target: Callable) -> HassJobType:
"""Determine the job type from the callable."""
# Check for partials to properly determine if coroutine function
check_target = target
while isinstance(check_target, functools.partial):
check_target = check_target.func
if asyncio.iscoroutinefunction(check_target):
return HassJobType.Coroutinefunction
if is_callback(check_target):
return HassJobType.Callback
return HassJobType.Executor
class CoreState(enum.Enum):
"""Represent the current state of Home Assistant."""
2019-07-31 21:25:30 +02:00
not_running = "NOT_RUNNING"
starting = "STARTING"
running = "RUNNING"
stopping = "STOPPING"
final_write = "FINAL_WRITE"
stopped = "STOPPED"
def __str__(self) -> str:
"""Return the event."""
return self.value
class HomeAssistant:
2015-12-28 06:14:35 +01:00
"""Root object of the Home Assistant home automation."""
2013-09-30 09:20:27 +02:00
2021-03-18 22:58:19 +01:00
auth: AuthManager
http: HomeAssistantHTTP = None # type: ignore
config_entries: ConfigEntries = None # type: ignore
def __init__(self) -> None:
2015-12-28 06:14:35 +01:00
"""Initialize new Home Assistant object."""
self.loop = asyncio.get_running_loop()
self._pending_tasks: list = []
self._track_task = True
self.bus = EventBus(self)
self.services = ServiceRegistry(self)
self.states = StateMachine(self.bus, self.loop)
self.config = Config(self)
self.components = loader.Components(self)
self.helpers = loader.Helpers(self)
# This is a dictionary that any component can store any data on.
self.data: dict = {}
self.state: CoreState = CoreState.not_running
self.exit_code: int = 0
# If not None, use to signal end-of-loop
2021-03-17 17:34:55 +01:00
self._stopped: asyncio.Event | None = None
# Timeout handler for Core/Helper namespace
self.timeout: TimeoutManager = TimeoutManager()
@property
def is_running(self) -> bool:
"""Return if Home Assistant is running."""
return self.state in (CoreState.starting, CoreState.running)
@property
def is_stopping(self) -> bool:
"""Return if Home Assistant is stopping."""
return self.state in (CoreState.stopping, CoreState.final_write)
def start(self) -> int:
"""Start Home Assistant.
Note: This function is only used for testing.
For regular use, use "await hass.run()".
"""
# Register the async start
fire_coroutine_threadsafe(self.async_start(), self.loop)
# Run forever
# Block until stopped
_LOGGER.info("Starting Home Assistant core loop")
self.loop.run_forever()
return self.exit_code
async def async_run(self, *, attach_signals: bool = True) -> int:
"""Home Assistant main entry point.
Start Home Assistant and block until stopped.
This method is a coroutine.
"""
if self.state != CoreState.not_running:
raise RuntimeError("Home Assistant is already running")
# _async_stop will set this instead of stopping the loop
self._stopped = asyncio.Event()
await self.async_start()
if attach_signals:
# pylint: disable=import-outside-toplevel
2019-07-31 21:25:30 +02:00
from homeassistant.helpers.signal import async_register_signal_handling
async_register_signal_handling(self)
await self._stopped.wait()
return self.exit_code
async def async_start(self) -> None:
"""Finalize startup from inside the event loop.
This method is a coroutine.
"""
_LOGGER.info("Starting Home Assistant")
2019-07-31 21:25:30 +02:00
setattr(self.loop, "_thread_ident", threading.get_ident())
self.state = CoreState.starting
self.bus.async_fire(EVENT_CORE_CONFIG_UPDATE)
self.bus.async_fire(EVENT_HOMEASSISTANT_START)
try:
# Only block for EVENT_HOMEASSISTANT_START listener
self.async_stop_track_tasks()
async with self.timeout.async_timeout(TIMEOUT_EVENT_START):
await self.async_block_till_done()
except asyncio.TimeoutError:
_LOGGER.warning(
2019-07-31 21:25:30 +02:00
"Something is blocking Home Assistant from wrapping up the "
"start up phase. We're going to continue anyway. Please "
"report the following info at https://github.com/home-assistant/core/issues: %s",
2019-07-31 21:25:30 +02:00
", ".join(self.config.components),
)
# Allow automations to set up the start triggers before changing state
await asyncio.sleep(0)
if self.state != CoreState.starting:
_LOGGER.warning(
2019-07-31 21:25:30 +02:00
"Home Assistant startup has been interrupted. "
"Its state may be inconsistent"
2019-07-31 21:25:30 +02:00
)
return
self.state = CoreState.running
self.bus.async_fire(EVENT_CORE_CONFIG_UPDATE)
self.bus.async_fire(EVENT_HOMEASSISTANT_STARTED)
_async_create_timer(self)
def add_job(self, target: Callable[..., Any], *args: Any) -> None:
"""Add job to the executor pool.
target: target to call.
args: parameters for method to call.
"""
2016-12-16 06:30:09 +01:00
if target is None:
raise ValueError("Don't call add_job with None")
self.loop.call_soon_threadsafe(self.async_add_job, target, *args)
@callback
def async_add_job(
2019-07-31 21:25:30 +02:00
self, target: Callable[..., Any], *args: Any
2021-03-17 17:34:55 +01:00
) -> asyncio.Future | None:
"""Add a job from within the event loop.
This method must be run in the event loop.
target: target to call.
args: parameters for method to call.
"""
if target is None:
raise ValueError("Don't call async_add_job with None")
if asyncio.iscoroutine(target):
return self.async_create_task(cast(Coroutine, target))
return self.async_add_hass_job(HassJob(target), *args)
@callback
2021-03-17 17:34:55 +01:00
def async_add_hass_job(self, hassjob: HassJob, *args: Any) -> asyncio.Future | None:
"""Add a HassJob from within the event loop.
This method must be run in the event loop.
hassjob: HassJob to call.
args: parameters for method to call.
"""
if hassjob.job_type == HassJobType.Coroutinefunction:
task = self.loop.create_task(hassjob.target(*args))
elif hassjob.job_type == HassJobType.Callback:
self.loop.call_soon(hassjob.target, *args)
return None
else:
task = self.loop.run_in_executor( # type: ignore
None, hassjob.target, *args
)
# If a task is scheduled
if self._track_task:
self._pending_tasks.append(task)
return task
def create_task(self, target: Awaitable) -> None:
"""Add task to the executor pool.
target: target to call.
"""
self.loop.call_soon_threadsafe(self.async_create_task, target)
@callback
2021-07-26 16:17:15 +02:00
def async_create_task(self, target: Awaitable) -> asyncio.Task:
"""Create a task from within the eventloop.
This method must be run in the event loop.
target: target to call.
"""
2021-07-26 16:17:15 +02:00
task: asyncio.Task = self.loop.create_task(target)
if self._track_task:
self._pending_tasks.append(task)
return task
@callback
def async_add_executor_job(
2019-07-31 21:25:30 +02:00
self, target: Callable[..., T], *args: Any
) -> Awaitable[T]:
"""Add an executor job from within the event loop."""
2019-07-31 21:25:30 +02:00
task = self.loop.run_in_executor(None, target, *args)
# If a task is scheduled
if self._track_task:
self._pending_tasks.append(task)
return task
@callback
def async_track_tasks(self) -> None:
"""Track tasks so you can wait for all tasks to be done."""
self._track_task = True
@callback
def async_stop_track_tasks(self) -> None:
"""Stop track tasks so you can't wait for all tasks to be done."""
self._track_task = False
@callback
2021-03-17 17:34:55 +01:00
def async_run_hass_job(self, hassjob: HassJob, *args: Any) -> asyncio.Future | None:
"""Run a HassJob from within the event loop.
This method must be run in the event loop.
hassjob: HassJob
args: parameters for method to call.
"""
if hassjob.job_type == HassJobType.Callback:
hassjob.target(*args)
return None
return self.async_add_hass_job(hassjob, *args)
@callback
def async_run_job(
2021-03-17 17:34:55 +01:00
self, target: Callable[..., None | Awaitable], *args: Any
) -> asyncio.Future | None:
"""Run a job from within the event loop.
This method must be run in the event loop.
target: target to call.
args: parameters for method to call.
"""
if asyncio.iscoroutine(target):
return self.async_create_task(cast(Coroutine, target))
return self.async_run_hass_job(HassJob(target), *args)
def block_till_done(self) -> None:
Add support for simultaneous runs of Script helper - Part 2 (#32442) * Add limit parameter to service call methods * Break out prep part of async_call_from_config for use elsewhere * Minor cleanup * Fix improper use of asyncio.wait * Fix state update Call change listener immediately if its a callback * Fix exception handling and logging * Merge Script helper if_running/run_mode parameters into script_mode - Remove background/blocking _ScriptRun subclasses which are no longer needed. * Add queued script mode * Disable timeout when making fully blocking script call * Don't call change listener when restarting script This makes restart mode behavior consistent with parallel & queue modes. * Changes per review - Call all script services (except script.turn_off) with no time limit. - Fix handling of lock in _QueuedScriptRun and add comments to make it clearer how this code works. * Changes per review 2 - Move cancel shielding "up" from _ScriptRun.async_run to Script.async_run (and apply to new style scripts only.) This makes sure Script class also properly handles cancellation which it wasn't doing before. - In _ScriptRun._async_call_service_step, instead of using script.turn_off service, just cancel service call and let it handle the cancellation accordingly. * Fix bugs - Add missing call to change listener in Script.async_run in cancelled path. - Cancel service task if ServiceRegistry.async_call cancelled. * Revert last changes to ServiceRegistry.async_call * Minor Script helper fixes & test improvements - Don't log asyncio.CancelledError exceptions. - Make change_listener a public attribute. - Test overhaul - Parametrize tests. - Use common test functions. - Mock timeout so tests don't need to wait for real time to elapse. - Add common function for waiting for script action step.
2020-03-12 00:34:50 +01:00
"""Block until all pending work is done."""
asyncio.run_coroutine_threadsafe(
self.async_block_till_done(), self.loop
).result()
async def async_block_till_done(self) -> None:
Add support for simultaneous runs of Script helper - Part 2 (#32442) * Add limit parameter to service call methods * Break out prep part of async_call_from_config for use elsewhere * Minor cleanup * Fix improper use of asyncio.wait * Fix state update Call change listener immediately if its a callback * Fix exception handling and logging * Merge Script helper if_running/run_mode parameters into script_mode - Remove background/blocking _ScriptRun subclasses which are no longer needed. * Add queued script mode * Disable timeout when making fully blocking script call * Don't call change listener when restarting script This makes restart mode behavior consistent with parallel & queue modes. * Changes per review - Call all script services (except script.turn_off) with no time limit. - Fix handling of lock in _QueuedScriptRun and add comments to make it clearer how this code works. * Changes per review 2 - Move cancel shielding "up" from _ScriptRun.async_run to Script.async_run (and apply to new style scripts only.) This makes sure Script class also properly handles cancellation which it wasn't doing before. - In _ScriptRun._async_call_service_step, instead of using script.turn_off service, just cancel service call and let it handle the cancellation accordingly. * Fix bugs - Add missing call to change listener in Script.async_run in cancelled path. - Cancel service task if ServiceRegistry.async_call cancelled. * Revert last changes to ServiceRegistry.async_call * Minor Script helper fixes & test improvements - Don't log asyncio.CancelledError exceptions. - Make change_listener a public attribute. - Test overhaul - Parametrize tests. - Use common test functions. - Mock timeout so tests don't need to wait for real time to elapse. - Add common function for waiting for script action step.
2020-03-12 00:34:50 +01:00
"""Block until all pending work is done."""
# To flush out any call_soon_threadsafe
await asyncio.sleep(0)
2021-03-17 17:34:55 +01:00
start_time: float | None = None
while self._pending_tasks:
2019-07-31 21:25:30 +02:00
pending = [task for task in self._pending_tasks if not task.done()]
self._pending_tasks.clear()
if pending:
await self._await_and_log_pending(pending)
if start_time is None:
# Avoid calling monotonic() until we know
# we may need to start logging blocked tasks.
start_time = 0
elif start_time == 0:
# If we have waited twice then we set the start
# time
start_time = monotonic()
elif monotonic() - start_time > BLOCK_LOG_TIMEOUT:
# We have waited at least three loops and new tasks
# continue to block. At this point we start
# logging all waiting tasks.
for task in pending:
_LOGGER.debug("Waiting for task: %s", task)
else:
await asyncio.sleep(0)
2014-11-23 18:51:16 +01:00
async def _await_and_log_pending(self, pending: Iterable[Awaitable[Any]]) -> None:
"""Await and log tasks that take a long time."""
wait_time = 0
while pending:
_, pending = await asyncio.wait(pending, timeout=BLOCK_LOG_TIMEOUT)
if not pending:
return
wait_time += BLOCK_LOG_TIMEOUT
for task in pending:
_LOGGER.debug("Waited %s seconds for task: %s", wait_time, task)
def stop(self) -> None:
2015-12-28 06:14:35 +01:00
"""Stop Home Assistant and shuts down all threads."""
if self.state == CoreState.not_running: # just ignore
return
fire_coroutine_threadsafe(self.async_stop(), self.loop)
2019-07-31 21:25:30 +02:00
async def async_stop(self, exit_code: int = 0, *, force: bool = False) -> None:
"""Stop Home Assistant and shuts down all threads.
The "force" flag commands async_stop to proceed regardless of
Home Assistan't current state. You should not set this flag
unless you're testing.
This method is a coroutine.
"""
if not force:
# Some tests require async_stop to run,
# regardless of the state of the loop.
if self.state == CoreState.not_running: # just ignore
return
if self.state in [CoreState.stopping, CoreState.final_write]:
_LOGGER.info("Additional call to async_stop was ignored")
return
if self.state == CoreState.starting:
# This may not work
_LOGGER.warning(
"Stopping Home Assistant before startup has completed may fail"
)
# stage 1
self.state = CoreState.stopping
self.async_track_tasks()
self.bus.async_fire(EVENT_HOMEASSISTANT_STOP)
try:
async with self.timeout.async_timeout(STAGE_1_SHUTDOWN_TIMEOUT):
await self.async_block_till_done()
except asyncio.TimeoutError:
_LOGGER.warning(
"Timed out waiting for shutdown stage 1 to complete, the shutdown will continue"
)
# stage 2
self.state = CoreState.final_write
self.bus.async_fire(EVENT_HOMEASSISTANT_FINAL_WRITE)
try:
async with self.timeout.async_timeout(STAGE_2_SHUTDOWN_TIMEOUT):
await self.async_block_till_done()
except asyncio.TimeoutError:
_LOGGER.warning(
"Timed out waiting for shutdown stage 2 to complete, the shutdown will continue"
)
# stage 3
self.state = CoreState.not_running
self.bus.async_fire(EVENT_HOMEASSISTANT_CLOSE)
# Prevent run_callback_threadsafe from scheduling any additional
# callbacks in the event loop as callbacks created on the futures
# it returns will never run after the final `self.async_block_till_done`
# which will cause the futures to block forever when waiting for
# the `result()` which will cause a deadlock when shutting down the executor.
shutdown_run_callback_threadsafe(self.loop)
try:
async with self.timeout.async_timeout(STAGE_3_SHUTDOWN_TIMEOUT):
await self.async_block_till_done()
except asyncio.TimeoutError:
_LOGGER.warning(
"Timed out waiting for shutdown stage 3 to complete, the shutdown will continue"
)
self.exit_code = exit_code
self.state = CoreState.stopped
if self._stopped is not None:
self._stopped.set()
2014-11-23 18:51:16 +01:00
@attr.s(slots=True, frozen=True)
class Context:
"""The context that triggered something."""
2020-07-14 19:30:30 +02:00
user_id: str = attr.ib(default=None)
2021-03-17 17:34:55 +01:00
parent_id: str | None = attr.ib(default=None)
id: str = attr.ib(factory=uuid_util.random_uuid_hex)
2021-03-17 17:34:55 +01:00
def as_dict(self) -> dict[str, str | None]:
"""Return a dictionary representation of the context."""
2019-07-31 21:25:30 +02:00
return {"id": self.id, "parent_id": self.parent_id, "user_id": self.user_id}
class EventOrigin(enum.Enum):
2016-03-08 00:06:04 +01:00
"""Represent the origin of an event."""
2019-07-31 21:25:30 +02:00
local = "LOCAL"
remote = "REMOTE"
def __str__(self) -> str:
2016-03-08 00:06:04 +01:00
"""Return the event."""
return self.value
class Event:
"""Representation of an event within the bus."""
2019-07-31 21:25:30 +02:00
__slots__ = ["event_type", "data", "origin", "time_fired", "context"]
2019-07-31 21:25:30 +02:00
def __init__(
self,
event_type: str,
2021-03-17 17:34:55 +01:00
data: dict[str, Any] | None = None,
2019-07-31 21:25:30 +02:00
origin: EventOrigin = EventOrigin.local,
2021-03-17 17:34:55 +01:00
time_fired: datetime.datetime | None = None,
context: Context | None = None,
2019-07-31 21:25:30 +02:00
) -> None:
2015-12-28 06:14:35 +01:00
"""Initialize a new event."""
self.event_type = event_type
self.data = data or {}
self.origin = origin
2016-04-16 09:55:35 +02:00
self.time_fired = time_fired or dt_util.utcnow()
self.context: Context = context or Context()
def __hash__(self) -> int:
"""Make hashable."""
# The only event type that shares context are the TIME_CHANGED
return hash((self.event_type, self.context.id, self.time_fired))
2021-03-17 17:34:55 +01:00
def as_dict(self) -> dict[str, Any]:
"""Create a dict representation of this Event.
Async friendly.
"""
2015-01-28 09:22:09 +01:00
return {
2019-07-31 21:25:30 +02:00
"event_type": self.event_type,
"data": dict(self.data),
"origin": str(self.origin.value),
"time_fired": self.time_fired.isoformat(),
2019-07-31 21:25:30 +02:00
"context": self.context.as_dict(),
2015-01-28 09:22:09 +01:00
}
def __repr__(self) -> str:
2016-03-08 00:06:04 +01:00
"""Return the representation."""
if self.data:
return f"<Event {self.event_type}[{str(self.origin)[0]}]: {util.repr_helper(self.data)}>"
return f"<Event {self.event_type}[{str(self.origin)[0]}]>"
2013-11-11 01:46:48 +01:00
def __eq__(self, other: Any) -> bool:
2016-03-08 00:06:04 +01:00
"""Return the comparison."""
2019-07-31 22:08:31 +02:00
return ( # type: ignore
2019-07-31 21:25:30 +02:00
self.__class__ == other.__class__
2019-07-31 22:08:31 +02:00
and self.event_type == other.event_type
2019-07-31 21:25:30 +02:00
and self.data == other.data
and self.origin == other.origin
and self.time_fired == other.time_fired
and self.context == other.context
)
2015-04-30 08:21:31 +02:00
2013-10-09 03:50:30 +02:00
class EventBus:
"""Allow the firing of and listening for events."""
2013-09-30 09:20:27 +02:00
def __init__(self, hass: HomeAssistant) -> None:
2015-12-28 06:14:35 +01:00
"""Initialize a new event bus."""
2021-03-17 17:34:55 +01:00
self._listeners: dict[str, list[tuple[HassJob, Callable | None]]] = {}
self._hass = hass
@callback
2021-03-17 17:34:55 +01:00
def async_listeners(self) -> dict[str, int]:
"""Return dictionary with events and the number of listeners.
This method must be run in the event loop.
"""
return {key: len(listeners) for key, listeners in self._listeners.items()}
@property
2021-03-17 17:34:55 +01:00
def listeners(self) -> dict[str, int]:
"""Return dictionary with events and the number of listeners."""
2020-04-17 20:33:58 +02:00
return run_callback_threadsafe(self._hass.loop, self.async_listeners).result()
2014-01-30 07:48:35 +01:00
2019-07-31 21:25:30 +02:00
def fire(
self,
event_type: str,
2021-03-17 17:34:55 +01:00
event_data: dict | None = None,
2019-07-31 21:25:30 +02:00
origin: EventOrigin = EventOrigin.local,
2021-03-17 17:34:55 +01:00
context: Context | None = None,
2019-07-31 21:25:30 +02:00
) -> None:
2015-12-28 06:14:35 +01:00
"""Fire an event."""
self._hass.loop.call_soon_threadsafe(
2019-07-31 21:25:30 +02:00
self.async_fire, event_type, event_data, origin, context
)
2013-09-30 09:20:27 +02:00
@callback
2019-07-31 21:25:30 +02:00
def async_fire(
self,
event_type: str,
2021-03-17 17:34:55 +01:00
event_data: dict[str, Any] | None = None,
2019-07-31 21:25:30 +02:00
origin: EventOrigin = EventOrigin.local,
2021-03-17 17:34:55 +01:00
context: Context | None = None,
time_fired: datetime.datetime | None = None,
2019-07-31 21:25:30 +02:00
) -> None:
"""Fire an event.
This method must be run in the event loop.
"""
if len(event_type) > MAX_LENGTH_EVENT_EVENT_TYPE:
raise MaxLengthExceeded(
event_type, "event_type", MAX_LENGTH_EVENT_EVENT_TYPE
)
listeners = self._listeners.get(event_type, [])
# EVENT_HOMEASSISTANT_CLOSE should go only to his listeners
match_all_listeners = self._listeners.get(MATCH_ALL)
2019-07-31 21:25:30 +02:00
if match_all_listeners is not None and event_type != EVENT_HOMEASSISTANT_CLOSE:
listeners = match_all_listeners + listeners
event = Event(event_type, event_data, origin, time_fired, context)
if event_type != EVENT_TIME_CHANGED:
2018-09-18 15:06:52 +02:00
_LOGGER.debug("Bus:Handling %s", event)
if not listeners:
return
for job, event_filter in listeners:
if event_filter is not None:
try:
if not event_filter(event):
continue
except Exception: # pylint: disable=broad-except
_LOGGER.exception("Error in event filter")
continue
self._hass.async_add_hass_job(job, event)
2019-07-31 21:25:30 +02:00
def listen(self, event_type: str, listener: Callable) -> CALLBACK_TYPE:
2015-12-28 06:14:35 +01:00
"""Listen for all events or events of a specific type.
To listen to all events specify the constant ``MATCH_ALL``
as event_type.
"""
async_remove_listener = run_callback_threadsafe(
2019-07-31 21:25:30 +02:00
self._hass.loop, self.async_listen, event_type, listener
).result()
def remove_listener() -> None:
2016-08-26 08:25:35 +02:00
"""Remove the listener."""
2019-07-31 21:25:30 +02:00
run_callback_threadsafe(self._hass.loop, async_remove_listener).result()
2016-08-26 08:25:35 +02:00
return remove_listener
@callback
def async_listen(
self,
event_type: str,
listener: Callable,
2021-03-17 17:34:55 +01:00
event_filter: Callable | None = None,
) -> CALLBACK_TYPE:
"""Listen for all events or events of a specific type.
To listen to all events specify the constant ``MATCH_ALL``
as event_type.
An optional event_filter, which must be a callable decorated with
@callback that returns a boolean value, determines if the
listener callable should run.
This method must be run in the event loop.
"""
if event_filter is not None and not is_callback(event_filter):
raise HomeAssistantError(f"Event filter {event_filter} is not a callback")
return self._async_listen_filterable_job(
event_type, (HassJob(listener), event_filter)
)
@callback
def _async_listen_filterable_job(
2021-03-17 17:34:55 +01:00
self, event_type: str, filterable_job: tuple[HassJob, Callable | None]
) -> CALLBACK_TYPE:
self._listeners.setdefault(event_type, []).append(filterable_job)
def remove_listener() -> None:
"""Remove the listener."""
self._async_remove_listener(event_type, filterable_job)
return remove_listener
2021-04-17 08:35:21 +02:00
def listen_once(
self, event_type: str, listener: Callable[[Event], None]
) -> CALLBACK_TYPE:
2015-12-28 06:14:35 +01:00
"""Listen once for event of a specific type.
2014-11-29 08:19:59 +01:00
To listen to all events specify the constant ``MATCH_ALL``
as event_type.
2016-09-07 15:59:59 +02:00
Returns function to unsubscribe the listener.
2014-11-29 08:19:59 +01:00
"""
async_remove_listener = run_callback_threadsafe(
2019-07-31 21:25:30 +02:00
self._hass.loop, self.async_listen_once, event_type, listener
).result()
2014-11-29 08:19:59 +01:00
def remove_listener() -> None:
"""Remove the listener."""
2019-07-31 21:25:30 +02:00
run_callback_threadsafe(self._hass.loop, async_remove_listener).result()
2014-11-29 08:19:59 +01:00
2016-09-07 15:59:59 +02:00
return remove_listener
@callback
2019-07-31 21:25:30 +02:00
def async_listen_once(self, event_type: str, listener: Callable) -> CALLBACK_TYPE:
"""Listen once for event of a specific type.
To listen to all events specify the constant ``MATCH_ALL``
as event_type.
Returns registered listener that can be used with remove_listener.
This method must be run in the event loop.
"""
2021-03-17 17:34:55 +01:00
filterable_job: tuple[HassJob, Callable | None] | None = None
2019-07-31 21:25:30 +02:00
@callback
def _onetime_listener(event: Event) -> None:
"""Remove listener from event bus and then fire listener."""
nonlocal filterable_job
if hasattr(_onetime_listener, "run"):
return
# Set variable so that we will never run twice.
# Because the event bus loop might have async_fire queued multiple
# times, its possible this listener may already be lined up
# multiple times as well.
# This will make sure the second time it does nothing.
setattr(_onetime_listener, "run", True)
assert filterable_job is not None
self._async_remove_listener(event_type, filterable_job)
self._hass.async_run_job(listener, event)
filterable_job = (HassJob(_onetime_listener), None)
return self._async_listen_filterable_job(event_type, filterable_job)
2016-09-07 15:59:59 +02:00
@callback
def _async_remove_listener(
2021-03-17 17:34:55 +01:00
self, event_type: str, filterable_job: tuple[HassJob, Callable | None]
) -> None:
"""Remove a listener of a specific event_type.
This method must be run in the event loop.
"""
try:
self._listeners[event_type].remove(filterable_job)
2013-10-24 01:29:33 +02:00
# delete event_type list if empty
if not self._listeners[event_type]:
self._listeners.pop(event_type)
except (KeyError, ValueError):
# KeyError is key event_type listener did not exist
# ValueError if listener did not exist within event_type
_LOGGER.exception(
"Unable to remove unknown job listener %s", filterable_job
)
2013-09-30 09:20:27 +02:00
2013-11-11 01:46:48 +01:00
class State:
2016-03-08 00:06:04 +01:00
"""Object to represent a state within the state machine.
entity_id: the entity that is represented.
state: the state of the entity
attributes: extra information on entity and state
last_changed: last time the state was changed, not the attributes.
last_updated: last time this object was updated.
context: Context in which it was created
domain: Domain of this state.
object_id: Object id of this state.
"""
2019-07-31 21:25:30 +02:00
__slots__ = [
"entity_id",
"state",
"attributes",
"last_changed",
"last_updated",
"context",
"domain",
"object_id",
"_as_dict",
2019-07-31 21:25:30 +02:00
]
def __init__(
self,
entity_id: str,
state: str,
2021-03-17 17:34:55 +01:00
attributes: Mapping[str, Any] | None = None,
last_changed: datetime.datetime | None = None,
last_updated: datetime.datetime | None = None,
context: Context | None = None,
validate_entity_id: bool | None = True,
2019-07-31 21:25:30 +02:00
) -> None:
2015-12-28 06:14:35 +01:00
"""Initialize a new state."""
state = str(state)
if validate_entity_id and not valid_entity_id(entity_id):
2019-07-31 21:25:30 +02:00
raise InvalidEntityFormatError(
f"Invalid entity id encountered: {entity_id}. "
"Format should be <domain>.<object_id>"
2019-07-31 21:25:30 +02:00
)
if not valid_state(state):
2019-07-31 21:25:30 +02:00
raise InvalidStateError(
f"Invalid state encountered for entity ID: {entity_id}. "
"State max length is 255 characters."
2019-07-31 21:25:30 +02:00
)
self.entity_id = entity_id.lower()
self.state = state
2016-02-10 08:27:01 +01:00
self.attributes = MappingProxyType(attributes or {})
2016-04-16 09:55:35 +02:00
self.last_updated = last_updated or dt_util.utcnow()
self.last_changed = last_changed or self.last_updated
self.context = context or Context()
self.domain, self.object_id = split_entity_id(self.entity_id)
2021-03-17 17:34:55 +01:00
self._as_dict: dict[str, Collection[Any]] | None = None
2015-03-29 23:39:47 +02:00
@property
def name(self) -> str:
2015-12-28 06:14:35 +01:00
"""Name of this state."""
2019-07-31 21:25:30 +02:00
return self.attributes.get(ATTR_FRIENDLY_NAME) or self.object_id.replace(
"_", " "
)
2015-03-29 23:39:47 +02:00
2021-03-17 17:34:55 +01:00
def as_dict(self) -> dict:
2015-12-28 06:14:35 +01:00
"""Return a dict representation of the State.
Async friendly.
2015-12-28 06:14:35 +01:00
To be used for JSON serialization.
Ensures: state == State.from_dict(state.as_dict())
"""
if not self._as_dict:
last_changed_isoformat = self.last_changed.isoformat()
if self.last_changed == self.last_updated:
last_updated_isoformat = last_changed_isoformat
else:
last_updated_isoformat = self.last_updated.isoformat()
self._as_dict = {
"entity_id": self.entity_id,
"state": self.state,
"attributes": dict(self.attributes),
"last_changed": last_changed_isoformat,
"last_updated": last_updated_isoformat,
"context": self.context.as_dict(),
}
return self._as_dict
2014-04-15 08:48:00 +02:00
@classmethod
2021-03-17 17:34:55 +01:00
def from_dict(cls, json_dict: dict) -> Any:
2015-12-28 06:14:35 +01:00
"""Initialize a state from a dict.
Async friendly.
2015-12-28 06:14:35 +01:00
Ensures: state == State.from_json_dict(state.to_json_dict())
"""
2019-07-31 21:25:30 +02:00
if not (json_dict and "entity_id" in json_dict and "state" in json_dict):
2014-04-15 08:48:00 +02:00
return None
2019-07-31 21:25:30 +02:00
last_changed = json_dict.get("last_changed")
2016-04-16 09:55:35 +02:00
if isinstance(last_changed, str):
last_changed = dt_util.parse_datetime(last_changed)
2014-04-15 08:48:00 +02:00
2019-07-31 21:25:30 +02:00
last_updated = json_dict.get("last_updated")
2015-04-01 08:08:38 +02:00
2016-04-16 09:55:35 +02:00
if isinstance(last_updated, str):
last_updated = dt_util.parse_datetime(last_updated)
2015-04-01 08:08:38 +02:00
2019-07-31 21:25:30 +02:00
context = json_dict.get("context")
if context:
2019-07-31 21:25:30 +02:00
context = Context(id=context.get("id"), user_id=context.get("user_id"))
return cls(
json_dict["entity_id"],
json_dict["state"],
json_dict.get("attributes"),
last_changed,
last_updated,
context,
)
def __eq__(self, other: Any) -> bool:
2016-03-08 00:06:04 +01:00
"""Return the comparison of the state."""
2019-07-31 22:08:31 +02:00
return ( # type: ignore
2019-07-31 21:25:30 +02:00
self.__class__ == other.__class__
2019-07-31 22:08:31 +02:00
and self.entity_id == other.entity_id
2019-07-31 21:25:30 +02:00
and self.state == other.state
and self.attributes == other.attributes
and self.context == other.context
)
def __repr__(self) -> str:
2016-03-08 00:06:04 +01:00
"""Return the representation of the states."""
attrs = f"; {util.repr_helper(self.attributes)}" if self.attributes else ""
return (
f"<state {self.entity_id}={self.state}{attrs}"
f" @ {dt_util.as_local(self.last_changed).isoformat()}>"
2019-07-31 21:25:30 +02:00
)
class StateMachine:
2015-12-28 06:14:35 +01:00
"""Helper class that tracks the state of different entities."""
2013-09-30 09:20:27 +02:00
2019-07-31 21:25:30 +02:00
def __init__(self, bus: EventBus, loop: asyncio.events.AbstractEventLoop) -> None:
2015-12-28 06:14:35 +01:00
"""Initialize state machine."""
2021-03-17 17:34:55 +01:00
self._states: dict[str, State] = {}
self._reservations: set[str] = set()
self._bus = bus
self._loop = loop
2013-09-30 09:20:27 +02:00
2021-03-17 17:34:55 +01:00
def entity_ids(self, domain_filter: str | None = None) -> list[str]:
"""List of entity ids that are being tracked."""
future = run_callback_threadsafe(
self._loop, self.async_entity_ids, domain_filter
)
2020-04-17 20:33:58 +02:00
return future.result()
@callback
def async_entity_ids(
2021-03-17 17:34:55 +01:00
self, domain_filter: str | Iterable | None = None
) -> list[str]:
"""List of entity ids that are being tracked.
This method must be run in the event loop.
"""
if domain_filter is None:
return list(self._states)
2014-04-15 08:48:00 +02:00
if isinstance(domain_filter, str):
domain_filter = (domain_filter.lower(),)
2019-07-31 21:25:30 +02:00
return [
state.entity_id
for state in self._states.values()
if state.domain in domain_filter
2019-07-31 21:25:30 +02:00
]
@callback
def async_entity_ids_count(
2021-03-17 17:34:55 +01:00
self, domain_filter: str | Iterable | None = None
) -> int:
"""Count the entity ids that are being tracked.
This method must be run in the event loop.
"""
if domain_filter is None:
return len(self._states)
if isinstance(domain_filter, str):
domain_filter = (domain_filter.lower(),)
return len(
[None for state in self._states.values() if state.domain in domain_filter]
)
2021-03-17 17:34:55 +01:00
def all(self, domain_filter: str | Iterable | None = None) -> list[State]:
2015-12-28 06:14:35 +01:00
"""Create a list of all states."""
return run_callback_threadsafe(
self._loop, self.async_all, domain_filter
).result()
@callback
2021-03-17 17:34:55 +01:00
def async_all(self, domain_filter: str | Iterable | None = None) -> list[State]:
"""Create a list of all states matching the filter.
This method must be run in the event loop.
"""
if domain_filter is None:
return list(self._states.values())
if isinstance(domain_filter, str):
domain_filter = (domain_filter.lower(),)
return [
state for state in self._states.values() if state.domain in domain_filter
]
2021-03-17 17:34:55 +01:00
def get(self, entity_id: str) -> State | None:
"""Retrieve state of entity_id or None if not found.
Async friendly.
"""
2016-02-10 08:27:01 +01:00
return self._states.get(entity_id.lower())
2014-04-15 08:48:00 +02:00
def is_state(self, entity_id: str, state: str) -> bool:
"""Test if entity exists and is in specified state.
Async friendly.
"""
state_obj = self.get(entity_id)
return state_obj is not None and state_obj.state == state
2013-10-24 01:08:28 +02:00
def remove(self, entity_id: str) -> bool:
2015-12-28 06:14:35 +01:00
"""Remove the state of an entity.
2015-12-28 06:14:35 +01:00
Returns boolean to indicate if an entity was removed.
"""
2020-04-17 20:33:58 +02:00
return run_callback_threadsafe(
2019-07-31 21:25:30 +02:00
self._loop, self.async_remove, entity_id
).result()
@callback
2021-03-17 17:34:55 +01:00
def async_remove(self, entity_id: str, context: Context | None = None) -> bool:
"""Remove the state of an entity.
Returns boolean to indicate if an entity was removed.
This method must be run in the event loop.
"""
2015-02-06 09:17:30 +01:00
entity_id = entity_id.lower()
old_state = self._states.pop(entity_id, None)
2016-02-14 07:57:40 +01:00
if entity_id in self._reservations:
self._reservations.remove(entity_id)
if old_state is None:
return False
2016-02-14 07:57:40 +01:00
2019-07-31 21:25:30 +02:00
self._bus.async_fire(
EVENT_STATE_CHANGED,
{"entity_id": entity_id, "old_state": old_state, "new_state": None},
EventOrigin.local,
context=context,
2019-07-31 21:25:30 +02:00
)
return True
2019-07-31 21:25:30 +02:00
def set(
self,
entity_id: str,
new_state: str,
2021-03-17 17:34:55 +01:00
attributes: Mapping[str, Any] | None = None,
2019-07-31 21:25:30 +02:00
force_update: bool = False,
2021-03-17 17:34:55 +01:00
context: Context | None = None,
2019-07-31 21:25:30 +02:00
) -> None:
2015-12-28 06:14:35 +01:00
"""Set the state of an entity, add entity if it does not exist.
Attributes is an optional dict to specify attributes of this state.
If you just update the attributes and not the state, last changed will
not be affected.
"""
run_callback_threadsafe(
self._loop,
2019-07-31 21:25:30 +02:00
self.async_set,
entity_id,
new_state,
attributes,
force_update,
context,
).result()
@callback
def async_reserve(self, entity_id: str) -> None:
"""Reserve a state in the state machine for an entity being added.
This must not fire an event when the state is reserved.
This avoids a race condition where multiple entities with the same
entity_id are added.
"""
entity_id = entity_id.lower()
if entity_id in self._states or entity_id in self._reservations:
raise HomeAssistantError(
"async_reserve must not be called once the state is in the state machine."
)
self._reservations.add(entity_id)
@callback
def async_available(self, entity_id: str) -> bool:
"""Check to see if an entity_id is available to be used."""
entity_id = entity_id.lower()
return entity_id not in self._states and entity_id not in self._reservations
@callback
2019-07-31 21:25:30 +02:00
def async_set(
self,
entity_id: str,
new_state: str,
2021-03-17 17:34:55 +01:00
attributes: Mapping[str, Any] | None = None,
2019-07-31 21:25:30 +02:00
force_update: bool = False,
2021-03-17 17:34:55 +01:00
context: Context | None = None,
2019-07-31 21:25:30 +02:00
) -> None:
"""Set the state of an entity, add entity if it does not exist.
Attributes is an optional dict to specify attributes of this state.
If you just update the attributes and not the state, last changed will
not be affected.
This method must be run in the event loop.
"""
2015-02-06 09:17:30 +01:00
entity_id = entity_id.lower()
2015-01-13 08:31:31 +01:00
new_state = str(new_state)
attributes = attributes or {}
old_state = self._states.get(entity_id)
if old_state is None:
same_state = False
same_attr = False
last_changed = None
else:
2019-07-31 21:25:30 +02:00
same_state = old_state.state == new_state and not force_update
same_attr = old_state.attributes == MappingProxyType(attributes)
last_changed = old_state.last_changed if same_state else None
if same_state and same_attr:
return
if context is None:
context = Context()
now = dt_util.utcnow()
state = State(
entity_id,
new_state,
attributes,
last_changed,
now,
context,
old_state is None,
)
self._states[entity_id] = state
2019-07-31 21:25:30 +02:00
self._bus.async_fire(
EVENT_STATE_CHANGED,
{"entity_id": entity_id, "old_state": old_state, "new_state": state},
EventOrigin.local,
context,
time_fired=now,
2019-07-31 21:25:30 +02:00
)
class Service:
"""Representation of a callable service."""
2015-09-27 08:17:04 +02:00
__slots__ = ["job", "schema"]
2015-09-27 08:17:04 +02:00
2019-07-31 21:25:30 +02:00
def __init__(
self,
func: Callable,
2021-03-17 17:34:55 +01:00
schema: vol.Schema | None,
context: Context | None = None,
2019-07-31 21:25:30 +02:00
) -> None:
2015-12-28 06:14:35 +01:00
"""Initialize a service."""
self.job = HassJob(func)
self.schema = schema
2015-09-27 08:17:04 +02:00
class ServiceCall:
"""Representation of a call to a service."""
2019-07-31 21:25:30 +02:00
__slots__ = ["domain", "service", "data", "context"]
2019-07-31 21:25:30 +02:00
def __init__(
self,
domain: str,
service: str,
2021-03-17 17:34:55 +01:00
data: dict | None = None,
context: Context | None = None,
2019-07-31 21:25:30 +02:00
) -> None:
2015-12-28 06:14:35 +01:00
"""Initialize a service call."""
self.domain = domain.lower()
self.service = service.lower()
self.data = MappingProxyType(data or {})
self.context = context or Context()
def __repr__(self) -> str:
2018-01-27 20:58:27 +01:00
"""Return the representation of the service."""
if self.data:
return (
f"<ServiceCall {self.domain}.{self.service} "
f"(c:{self.context.id}): {util.repr_helper(self.data)}>"
2019-07-31 21:25:30 +02:00
)
return f"<ServiceCall {self.domain}.{self.service} (c:{self.context.id})>"
class ServiceRegistry:
"""Offer the services over the eventbus."""
def __init__(self, hass: HomeAssistant) -> None:
2015-12-28 06:14:35 +01:00
"""Initialize a service registry."""
2021-03-17 17:34:55 +01:00
self._services: dict[str, dict[str, Service]] = {}
self._hass = hass
@property
2021-03-17 17:34:55 +01:00
def services(self) -> dict[str, dict[str, Service]]:
"""Return dictionary with per domain a list of available services."""
2020-04-17 20:33:58 +02:00
return run_callback_threadsafe(self._hass.loop, self.async_services).result()
@callback
2021-03-17 17:34:55 +01:00
def async_services(self) -> dict[str, dict[str, Service]]:
"""Return dictionary with per domain a list of available services.
This method must be run in the event loop.
"""
return {domain: service.copy() for domain, service in self._services.items()}
def has_service(self, domain: str, service: str) -> bool:
"""Test if specified service exists.
Async friendly.
"""
return service.lower() in self._services.get(domain.lower(), [])
2019-07-31 21:25:30 +02:00
def register(
self,
domain: str,
service: str,
service_func: Callable,
2021-03-17 17:34:55 +01:00
schema: vol.Schema | None = None,
2019-07-31 21:25:30 +02:00
) -> None:
2015-09-27 08:17:04 +02:00
"""
Register a service.
Schema is called to coerce and validate the service data.
2015-09-27 08:17:04 +02:00
"""
run_callback_threadsafe(
2019-07-31 21:25:30 +02:00
self._hass.loop, self.async_register, domain, service, service_func, schema
).result()
@callback
2019-07-31 21:25:30 +02:00
def async_register(
self,
domain: str,
service: str,
service_func: Callable,
2021-03-17 17:34:55 +01:00
schema: vol.Schema | None = None,
2019-07-31 21:25:30 +02:00
) -> None:
"""
Register a service.
Schema is called to coerce and validate the service data.
This method must be run in the event loop.
"""
domain = domain.lower()
service = service.lower()
service_obj = Service(service_func, schema)
if domain in self._services:
self._services[domain][service] = service_obj
else:
self._services[domain] = {service: service_obj}
self._hass.bus.async_fire(
2019-07-31 21:25:30 +02:00
EVENT_SERVICE_REGISTERED, {ATTR_DOMAIN: domain, ATTR_SERVICE: service}
)
def remove(self, domain: str, service: str) -> None:
"""Remove a registered service from service handler."""
run_callback_threadsafe(
2019-07-31 21:25:30 +02:00
self._hass.loop, self.async_remove, domain, service
).result()
@callback
def async_remove(self, domain: str, service: str) -> None:
"""Remove a registered service from service handler.
This method must be run in the event loop.
"""
domain = domain.lower()
service = service.lower()
if service not in self._services.get(domain, {}):
_LOGGER.warning("Unable to remove unknown service %s/%s", domain, service)
return
self._services[domain].pop(service)
if not self._services[domain]:
self._services.pop(domain)
self._hass.bus.async_fire(
2019-07-31 21:25:30 +02:00
EVENT_SERVICE_REMOVED, {ATTR_DOMAIN: domain, ATTR_SERVICE: service}
)
2019-07-31 21:25:30 +02:00
def call(
self,
domain: str,
service: str,
2021-03-17 17:34:55 +01:00
service_data: dict | None = None,
2019-07-31 21:25:30 +02:00
blocking: bool = False,
2021-03-17 17:34:55 +01:00
context: Context | None = None,
limit: float | None = SERVICE_CALL_LIMIT,
target: dict | None = None,
) -> bool | None:
"""
2015-12-28 06:14:35 +01:00
Call a service.
Add support for simultaneous runs of Script helper - Part 2 (#32442) * Add limit parameter to service call methods * Break out prep part of async_call_from_config for use elsewhere * Minor cleanup * Fix improper use of asyncio.wait * Fix state update Call change listener immediately if its a callback * Fix exception handling and logging * Merge Script helper if_running/run_mode parameters into script_mode - Remove background/blocking _ScriptRun subclasses which are no longer needed. * Add queued script mode * Disable timeout when making fully blocking script call * Don't call change listener when restarting script This makes restart mode behavior consistent with parallel & queue modes. * Changes per review - Call all script services (except script.turn_off) with no time limit. - Fix handling of lock in _QueuedScriptRun and add comments to make it clearer how this code works. * Changes per review 2 - Move cancel shielding "up" from _ScriptRun.async_run to Script.async_run (and apply to new style scripts only.) This makes sure Script class also properly handles cancellation which it wasn't doing before. - In _ScriptRun._async_call_service_step, instead of using script.turn_off service, just cancel service call and let it handle the cancellation accordingly. * Fix bugs - Add missing call to change listener in Script.async_run in cancelled path. - Cancel service task if ServiceRegistry.async_call cancelled. * Revert last changes to ServiceRegistry.async_call * Minor Script helper fixes & test improvements - Don't log asyncio.CancelledError exceptions. - Make change_listener a public attribute. - Test overhaul - Parametrize tests. - Use common test functions. - Mock timeout so tests don't need to wait for real time to elapse. - Add common function for waiting for script action step.
2020-03-12 00:34:50 +01:00
See description of async_call for details.
"""
return asyncio.run_coroutine_threadsafe(
self.async_call(
domain, service, service_data, blocking, context, limit, target
),
2019-07-31 21:25:30 +02:00
self._hass.loop,
).result()
2019-07-31 21:25:30 +02:00
async def async_call(
self,
domain: str,
service: str,
2021-03-17 17:34:55 +01:00
service_data: dict | None = None,
2019-07-31 21:25:30 +02:00
blocking: bool = False,
2021-03-17 17:34:55 +01:00
context: Context | None = None,
limit: float | None = SERVICE_CALL_LIMIT,
target: dict | None = None,
) -> bool | None:
"""
Call a service.
Add support for simultaneous runs of Script helper - Part 2 (#32442) * Add limit parameter to service call methods * Break out prep part of async_call_from_config for use elsewhere * Minor cleanup * Fix improper use of asyncio.wait * Fix state update Call change listener immediately if its a callback * Fix exception handling and logging * Merge Script helper if_running/run_mode parameters into script_mode - Remove background/blocking _ScriptRun subclasses which are no longer needed. * Add queued script mode * Disable timeout when making fully blocking script call * Don't call change listener when restarting script This makes restart mode behavior consistent with parallel & queue modes. * Changes per review - Call all script services (except script.turn_off) with no time limit. - Fix handling of lock in _QueuedScriptRun and add comments to make it clearer how this code works. * Changes per review 2 - Move cancel shielding "up" from _ScriptRun.async_run to Script.async_run (and apply to new style scripts only.) This makes sure Script class also properly handles cancellation which it wasn't doing before. - In _ScriptRun._async_call_service_step, instead of using script.turn_off service, just cancel service call and let it handle the cancellation accordingly. * Fix bugs - Add missing call to change listener in Script.async_run in cancelled path. - Cancel service task if ServiceRegistry.async_call cancelled. * Revert last changes to ServiceRegistry.async_call * Minor Script helper fixes & test improvements - Don't log asyncio.CancelledError exceptions. - Make change_listener a public attribute. - Test overhaul - Parametrize tests. - Use common test functions. - Mock timeout so tests don't need to wait for real time to elapse. - Add common function for waiting for script action step.
2020-03-12 00:34:50 +01:00
Specify blocking=True to wait until service is executed.
Waits a maximum of limit, which may be None for no timeout.
If blocking = True, will return boolean if service executed
Add support for simultaneous runs of Script helper - Part 2 (#32442) * Add limit parameter to service call methods * Break out prep part of async_call_from_config for use elsewhere * Minor cleanup * Fix improper use of asyncio.wait * Fix state update Call change listener immediately if its a callback * Fix exception handling and logging * Merge Script helper if_running/run_mode parameters into script_mode - Remove background/blocking _ScriptRun subclasses which are no longer needed. * Add queued script mode * Disable timeout when making fully blocking script call * Don't call change listener when restarting script This makes restart mode behavior consistent with parallel & queue modes. * Changes per review - Call all script services (except script.turn_off) with no time limit. - Fix handling of lock in _QueuedScriptRun and add comments to make it clearer how this code works. * Changes per review 2 - Move cancel shielding "up" from _ScriptRun.async_run to Script.async_run (and apply to new style scripts only.) This makes sure Script class also properly handles cancellation which it wasn't doing before. - In _ScriptRun._async_call_service_step, instead of using script.turn_off service, just cancel service call and let it handle the cancellation accordingly. * Fix bugs - Add missing call to change listener in Script.async_run in cancelled path. - Cancel service task if ServiceRegistry.async_call cancelled. * Revert last changes to ServiceRegistry.async_call * Minor Script helper fixes & test improvements - Don't log asyncio.CancelledError exceptions. - Make change_listener a public attribute. - Test overhaul - Parametrize tests. - Use common test functions. - Mock timeout so tests don't need to wait for real time to elapse. - Add common function for waiting for script action step.
2020-03-12 00:34:50 +01:00
successfully within limit.
Add support for simultaneous runs of Script helper - Part 2 (#32442) * Add limit parameter to service call methods * Break out prep part of async_call_from_config for use elsewhere * Minor cleanup * Fix improper use of asyncio.wait * Fix state update Call change listener immediately if its a callback * Fix exception handling and logging * Merge Script helper if_running/run_mode parameters into script_mode - Remove background/blocking _ScriptRun subclasses which are no longer needed. * Add queued script mode * Disable timeout when making fully blocking script call * Don't call change listener when restarting script This makes restart mode behavior consistent with parallel & queue modes. * Changes per review - Call all script services (except script.turn_off) with no time limit. - Fix handling of lock in _QueuedScriptRun and add comments to make it clearer how this code works. * Changes per review 2 - Move cancel shielding "up" from _ScriptRun.async_run to Script.async_run (and apply to new style scripts only.) This makes sure Script class also properly handles cancellation which it wasn't doing before. - In _ScriptRun._async_call_service_step, instead of using script.turn_off service, just cancel service call and let it handle the cancellation accordingly. * Fix bugs - Add missing call to change listener in Script.async_run in cancelled path. - Cancel service task if ServiceRegistry.async_call cancelled. * Revert last changes to ServiceRegistry.async_call * Minor Script helper fixes & test improvements - Don't log asyncio.CancelledError exceptions. - Make change_listener a public attribute. - Test overhaul - Parametrize tests. - Use common test functions. - Mock timeout so tests don't need to wait for real time to elapse. - Add common function for waiting for script action step.
2020-03-12 00:34:50 +01:00
This method will fire an event to indicate the service has been called.
Because the service is sent as an event you are not allowed to use
the keys ATTR_DOMAIN and ATTR_SERVICE in your service_data.
This method is a coroutine.
"""
domain = domain.lower()
service = service.lower()
context = context or Context()
service_data = service_data or {}
try:
handler = self._services[domain][service]
except KeyError:
raise ServiceNotFound(domain, service) from None
if target:
service_data.update(target)
if handler.schema:
try:
processed_data = handler.schema(service_data)
except vol.Invalid:
_LOGGER.debug(
"Invalid data for service call %s.%s: %s",
domain,
service,
service_data,
)
raise
2018-12-10 12:58:51 +01:00
else:
processed_data = service_data
2018-12-10 12:58:51 +01:00
service_call = ServiceCall(domain, service, processed_data, context)
2019-07-31 21:25:30 +02:00
self._hass.bus.async_fire(
EVENT_CALL_SERVICE,
{
ATTR_DOMAIN: domain.lower(),
ATTR_SERVICE: service.lower(),
ATTR_SERVICE_DATA: service_data,
},
context=context,
)
coro = self._execute_service(handler, service_call)
if not blocking:
self._run_service_in_background(coro, service_call)
return None
task = self._hass.async_create_task(coro)
try:
await asyncio.wait({task}, timeout=limit)
except asyncio.CancelledError:
# Task calling us was cancelled, so cancel service call task, and wait for
# it to be cancelled, within reason, before leaving.
_LOGGER.debug("Service call was cancelled: %s", service_call)
task.cancel()
await asyncio.wait({task}, timeout=SERVICE_CALL_LIMIT)
raise
if task.cancelled():
# Service call task was cancelled some other way, such as during shutdown.
_LOGGER.debug("Service was cancelled: %s", service_call)
raise asyncio.CancelledError
if task.done():
# Propagate any exceptions that might have happened during service call.
task.result()
# Service call completed successfully!
return True
# Service call task did not complete before timeout expired.
# Let it keep running in background.
self._run_service_in_background(task, service_call)
_LOGGER.debug("Service did not complete before timeout: %s", service_call)
return False
def _run_service_in_background(
2021-03-17 17:34:55 +01:00
self, coro_or_task: Coroutine | asyncio.Task, service_call: ServiceCall
) -> None:
"""Run service call in background, catching and logging any exceptions."""
async def catch_exceptions() -> None:
try:
await coro_or_task
except Unauthorized:
_LOGGER.warning(
"Unauthorized service called %s/%s",
service_call.domain,
service_call.service,
)
except asyncio.CancelledError:
_LOGGER.debug("Service was cancelled: %s", service_call)
except Exception: # pylint: disable=broad-except
_LOGGER.exception("Error executing service: %s", service_call)
self._hass.async_create_task(catch_exceptions())
2019-07-31 21:25:30 +02:00
async def _execute_service(
self, handler: Service, service_call: ServiceCall
) -> None:
"""Execute a service."""
if handler.job.job_type == HassJobType.Coroutinefunction:
await handler.job.target(service_call)
elif handler.job.job_type == HassJobType.Callback:
handler.job.target(service_call)
else:
await self._hass.async_add_executor_job(handler.job.target, service_call)
2013-11-11 01:46:48 +01:00
class Config:
2015-12-28 06:14:35 +01:00
"""Configuration settings for Home Assistant."""
def __init__(self, hass: HomeAssistant) -> None:
2015-12-28 06:14:35 +01:00
"""Initialize a new config object."""
self.hass = hass
self.latitude: float = 0
self.longitude: float = 0
self.elevation: int = 0
self.location_name: str = "Home"
self.time_zone: str = "UTC"
self.units: UnitSystem = METRIC_SYSTEM
2021-03-17 17:34:55 +01:00
self.internal_url: str | None = None
self.external_url: str | None = None
self.currency: str = "EUR"
self.config_source: str = "default"
# If True, pip install is skipped for requirements on startup
self.skip_pip: bool = False
# List of loaded components
2021-03-17 17:34:55 +01:00
self.components: set[str] = set()
# API (HTTP) server configuration, see components.http.ApiConfig
2021-03-17 17:34:55 +01:00
self.api: Any | None = None
# Directory that holds the configuration
2021-03-17 17:34:55 +01:00
self.config_dir: str | None = None
# List of allowed external dirs to access
2021-03-17 17:34:55 +01:00
self.allowlist_external_dirs: set[str] = set()
# List of allowed external URLs that integrations may use
2021-03-17 17:34:55 +01:00
self.allowlist_external_urls: set[str] = set()
# Dictionary of Media folders that integrations may use
2021-03-17 17:34:55 +01:00
self.media_dirs: dict[str, str] = {}
# If Home Assistant is running in safe mode
self.safe_mode: bool = False
# Use legacy template behavior
self.legacy_templates: bool = False
2021-03-17 17:34:55 +01:00
def distance(self, lat: float, lon: float) -> float | None:
"""Calculate distance from Home Assistant.
Async friendly.
"""
Add unit system support Add unit symbol constants Initial unit system object Import more constants Pydoc for unit system file Import constants for configuration validation Unit system validation method Typing for constants Inches are valid lengths too Typings Change base class to dict - needed for remote api call serialization Validation Use dictionary keys Defined unit systems Update location util to use metric instead of us fahrenheit Update constant imports Import defined unit systems Update configuration to use unit system Update schema to use unit system Update constants Add imports to core for unit system and distance Type for config Default unit system Convert distance from HASS instance Update temperature conversion to use unit system Update temperature conversion Set unit system based on configuration Set info unit system Return unit system dictionary with config dictionary Auto discover unit system Update location test for use metric Update forecast unit system Update mold indicator unit system Update thermostat unit system Update thermostat demo test Unit tests around unit system Update test common hass configuration Update configuration unit tests There should always be a unit system! Update core unit tests Constants typing Linting issues Remove unused import Update fitbit sensor to use application unit system Update google travel time to use application unit system Update configuration example Update dht sensor Update DHT temperature conversion to use the utility function Update swagger config Update my sensors metric flag Update hvac component temperature conversion HVAC conversion for temperature Pull unit from sensor type map Pull unit from sensor type map Update the temper sensor unit Update yWeather sensor unit Update hvac demo unit test Set unit test config unit system to metric Use hass unit system length for default in proximity Use the name of the system instead of temperature Use constants from const Unused import Forecasted temperature Fix calculation in case furthest distance is greater than 1000000 units Remove unneeded constants Set default length to km or miles Use constants Linting doesn't like importing just for typing Fix reference Test is expecting meters - set config to meters Use constant Use constant PyDoc for unit test Should be not in Rename to units Change unit system to be an object - not a dictionary Return tuple in conversion Move convert to temperature util Temperature conversion is now in unit system Update imports Rename to units Units is now an object Use temperature util conversion Unit system is now an object Validate and convert unit system config Return the scalar value in template distance Test is expecting meters Update unit tests around unit system Distance util returns tuple Fix location info test Set units Update unit tests Convert distance DOH Pull out the scalar from the vector Linting I really hate python linting Linting again BLARG Unit test documentation Unit test around is metric flag Break ternary statement into if/else blocks Don't use dictionary - use members is metric flag Rename constants Use is metric flag Move constants to CONST file Move to const file Raise error if unit is not expected Typing No need to return unit since only performing conversion if it can work Use constants Line wrapping Raise error if invalid value Remove subscripts from conversion as they are no longer returned as tuples No longer tuples No longer tuples Check for numeric type Fix string format to use correct variable Typing Assert errors raised Remove subscript Only convert temperature if we know the unit If no unit of measurement set - default to HASS config Convert only if we know the unit Remove subscription Fix not in clause Linting fixes Wants a boolean Clearer if-block Check if the key is in the config first Missed a couple expecting tuples Backwards compatibility No like-y ternary! Error handling around state setting Pretty unit system configuration validation More tuple crap Use is metric flag Error handling around min/max temp Explode if no unit Pull unit from config Celsius has a decimal Unused import Check if it's a temperature before we try to convert it to a temperature Linting says too many statements - combine lat/long in a fairly reasonable manner Backwards compatibility unit test Better doc
2016-07-31 22:24:49 +02:00
return self.units.length(
2020-04-12 21:44:56 +02:00
location.distance(self.latitude, self.longitude, lat, lon), LENGTH_METERS
2019-07-31 21:25:30 +02:00
)
2015-09-20 18:35:03 +02:00
def path(self, *path: str) -> str:
"""Generate path to the file within the configuration directory.
Async friendly.
"""
2016-08-09 05:21:40 +02:00
if self.config_dir is None:
raise HomeAssistantError("config_dir is not set")
return os.path.join(self.config_dir, *path)
def is_allowed_external_url(self, url: str) -> bool:
"""Check if an external URL is allowed."""
parsed_url = f"{str(yarl.URL(url))}/"
return any(
allowed
for allowed in self.allowlist_external_urls
if parsed_url.startswith(allowed)
)
def is_allowed_path(self, path: str) -> bool:
"""Check if the path is valid for access from outside."""
assert path is not None
thepath = pathlib.Path(path)
try:
# The file path does not have to exist (it's parent should)
if thepath.exists():
thepath = thepath.resolve()
else:
thepath = thepath.parent.resolve()
except (FileNotFoundError, RuntimeError, PermissionError):
return False
2020-07-13 17:43:11 +02:00
for allowed_path in self.allowlist_external_dirs:
try:
2020-07-13 17:43:11 +02:00
thepath.relative_to(allowed_path)
return True
except ValueError:
pass
return False
2021-03-17 17:34:55 +01:00
def as_dict(self) -> dict:
"""Create a dictionary representation of the configuration.
Async friendly.
"""
2015-05-02 03:24:32 +02:00
return {
2019-07-31 21:25:30 +02:00
"latitude": self.latitude,
"longitude": self.longitude,
"elevation": self.elevation,
"unit_system": self.units.as_dict(),
"location_name": self.location_name,
"time_zone": self.time_zone,
2019-07-31 21:25:30 +02:00
"components": self.components,
"config_dir": self.config_dir,
2020-07-13 17:43:11 +02:00
# legacy, backwards compat
"whitelist_external_dirs": self.allowlist_external_dirs,
"allowlist_external_dirs": self.allowlist_external_dirs,
"allowlist_external_urls": self.allowlist_external_urls,
2019-07-31 21:25:30 +02:00
"version": __version__,
"config_source": self.config_source,
"safe_mode": self.safe_mode,
"state": self.hass.state.value,
"external_url": self.external_url,
"internal_url": self.internal_url,
"currency": self.currency,
2015-05-02 03:24:32 +02:00
}
def set_time_zone(self, time_zone_str: str) -> None:
"""Help to set the time zone."""
time_zone = dt_util.get_time_zone(time_zone_str)
if time_zone:
self.time_zone = time_zone_str
dt_util.set_default_time_zone(time_zone)
else:
raise ValueError(f"Received invalid time zone {time_zone_str}")
@callback
2019-07-31 21:25:30 +02:00
def _update(
self,
*,
source: str,
2021-03-17 17:34:55 +01:00
latitude: float | None = None,
longitude: float | None = None,
elevation: int | None = None,
unit_system: str | None = None,
location_name: str | None = None,
time_zone: str | None = None,
2020-08-29 07:59:24 +02:00
# pylint: disable=dangerous-default-value # _UNDEFs not modified
2021-03-17 17:34:55 +01:00
external_url: str | dict | None = _UNDEF,
internal_url: str | dict | None = _UNDEF,
currency: str | None = None,
2019-07-31 21:25:30 +02:00
) -> None:
"""Update the configuration from a dictionary."""
self.config_source = source
if latitude is not None:
self.latitude = latitude
if longitude is not None:
self.longitude = longitude
if elevation is not None:
self.elevation = elevation
if unit_system is not None:
if unit_system == CONF_UNIT_SYSTEM_IMPERIAL:
self.units = IMPERIAL_SYSTEM
else:
self.units = METRIC_SYSTEM
if location_name is not None:
self.location_name = location_name
if time_zone is not None:
self.set_time_zone(time_zone)
if external_url is not _UNDEF:
self.external_url = cast(Optional[str], external_url)
if internal_url is not _UNDEF:
self.internal_url = cast(Optional[str], internal_url)
if currency is not None:
self.currency = currency
async def async_update(self, **kwargs: Any) -> None:
"""Update the configuration from a dictionary."""
self._update(source=SOURCE_STORAGE, **kwargs)
await self.async_store()
2019-07-31 21:25:30 +02:00
self.hass.bus.async_fire(EVENT_CORE_CONFIG_UPDATE, kwargs)
async def async_load(self) -> None:
"""Load [homeassistant] core config."""
store = self.hass.helpers.storage.Store(
2019-07-31 21:25:30 +02:00
CORE_STORAGE_VERSION, CORE_STORAGE_KEY, private=True
)
data = await store.async_load()
if not data:
return
# In 2021.9 we fixed validation to disallow a path (because that's never correct)
# but this data still lives in storage, so we print a warning.
if data.get("external_url") and urlparse(data["external_url"]).path not in (
"",
"/",
):
_LOGGER.warning("Invalid external_url set. It's not allowed to have a path")
if data.get("internal_url") and urlparse(data["internal_url"]).path not in (
"",
"/",
):
_LOGGER.warning("Invalid internal_url set. It's not allowed to have a path")
self._update(
source=SOURCE_STORAGE,
latitude=data.get("latitude"),
longitude=data.get("longitude"),
elevation=data.get("elevation"),
unit_system=data.get("unit_system"),
location_name=data.get("location_name"),
time_zone=data.get("time_zone"),
external_url=data.get("external_url", _UNDEF),
internal_url=data.get("internal_url", _UNDEF),
currency=data.get("currency"),
)
async def async_store(self) -> None:
"""Store [homeassistant] core config."""
data = {
2019-07-31 21:25:30 +02:00
"latitude": self.latitude,
"longitude": self.longitude,
"elevation": self.elevation,
"unit_system": self.units.name,
"location_name": self.location_name,
"time_zone": self.time_zone,
"external_url": self.external_url,
"internal_url": self.internal_url,
"currency": self.currency,
}
store = self.hass.helpers.storage.Store(
2019-07-31 21:25:30 +02:00
CORE_STORAGE_VERSION, CORE_STORAGE_KEY, private=True
)
await store.async_save(data)
def _async_create_timer(hass: HomeAssistant) -> None:
2015-12-28 06:14:35 +01:00
"""Create a timer that will start on HOMEASSISTANT_START."""
handle = None
timer_context = Context()
def schedule_tick(now: datetime.datetime) -> None:
"""Schedule a timer tick when the next second rolls around."""
nonlocal handle
2019-07-31 21:25:30 +02:00
slp_seconds = 1 - (now.microsecond / 10 ** 6)
target = monotonic() + slp_seconds
handle = hass.loop.call_later(slp_seconds, fire_time_event, target)
@callback
def fire_time_event(target: float) -> None:
"""Fire next time event."""
now = dt_util.utcnow()
hass.bus.async_fire(
EVENT_TIME_CHANGED, {ATTR_NOW: now}, time_fired=now, context=timer_context
)
# If we are more than a second late, a tick was missed
late = monotonic() - target
if late > 1:
hass.bus.async_fire(
EVENT_TIMER_OUT_OF_SYNC,
{ATTR_SECONDS: late},
time_fired=now,
context=timer_context,
)
schedule_tick(now)
@callback
def stop_timer(_: Event) -> None:
"""Stop the timer."""
if handle is not None:
handle.cancel()
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, stop_timer)
_LOGGER.info("Timer:starting")
schedule_tick(dt_util.utcnow())