Compare commits
114 Commits
Author | SHA1 | Date |
---|---|---|
dependabot[bot] | 0d05a6eae3 | |
dependabot[bot] | 430aef68c6 | |
Stefan Agner | eac6070e12 | |
dependabot[bot] | 6693b7c2e6 | |
dependabot[bot] | 7898c3e433 | |
dependabot[bot] | 420ecd064e | |
dependabot[bot] | 4289be53f8 | |
dependabot[bot] | 29b41b564e | |
dependabot[bot] | 998eb69583 | |
Stefan Agner | 8ebc097ff4 | |
Mike Degatano | c05984ca49 | |
dependabot[bot] | 1a700c3013 | |
dependabot[bot] | a9c92cdec8 | |
dependabot[bot] | da8b938d5b | |
dependabot[bot] | 71e91328f1 | |
dependabot[bot] | 6356be4c52 | |
dependabot[bot] | e26e5440b6 | |
dependabot[bot] | fecfbd1a3e | |
dependabot[bot] | c00d6dfc76 | |
dependabot[bot] | 85be66d90d | |
Mike Degatano | 1ac506b391 | |
dependabot[bot] | f7738b77de | |
Mike Degatano | 824037bb7d | |
Stefan Agner | 221292ad14 | |
dependabot[bot] | 16f8c75e9f | |
dependabot[bot] | 90a37079f1 | |
J. Nick Koston | 798092af5e | |
Jan Čermák | 2a622a929d | |
dependabot[bot] | ca8eeaa68c | |
dependabot[bot] | d1b8ac1249 | |
dependabot[bot] | 3f629c4d60 | |
dependabot[bot] | 3fa910e68b | |
dependabot[bot] | e3cf2989c9 | |
dependabot[bot] | 136b2f402d | |
Mike Degatano | 8d18d2d9c6 | |
Mike Degatano | f18213361a | |
Jan Čermák | 18d9d32bca | |
dependabot[bot] | 1246e429c9 | |
dependabot[bot] | 77bc46bc37 | |
dependabot[bot] | ce16963c94 | |
dependabot[bot] | a70e8cfe58 | |
dependabot[bot] | ba922a1aaa | |
dependabot[bot] | b09230a884 | |
dependabot[bot] | f1cb9ca08e | |
Mike Degatano | 06513e88c6 | |
dependabot[bot] | b4a79bd068 | |
Mike Degatano | dfd8fe84e0 | |
J. Nick Koston | 4857c2e243 | |
dependabot[bot] | 7d384f6160 | |
Mike Degatano | 672a7621f9 | |
Mike Degatano | f0e2fb3f57 | |
dependabot[bot] | 8c3a520512 | |
dependabot[bot] | 22e50d56db | |
Mike Degatano | a0735f3585 | |
Mike Degatano | 50a2e8fde3 | |
dependabot[bot] | 55ed63cc79 | |
dependabot[bot] | 97e9dfff3f | |
dependabot[bot] | 501c9579fb | |
dependabot[bot] | f9aedadee6 | |
Stefan Agner | c3c17b2bc3 | |
Jan Čermák | a894c4589e | |
dependabot[bot] | 56a8a1b5a1 | |
dependabot[bot] | be3f7a6c37 | |
Jan Čermák | 906e400ab7 | |
Stefan Agner | a9265afd4c | |
Stefan Agner | d26058ac80 | |
dependabot[bot] | ebd1f30606 | |
dependabot[bot] | c78e077649 | |
dependabot[bot] | 07619223b0 | |
dependabot[bot] | 25c326ec6c | |
dependabot[bot] | df167b94c2 | |
dependabot[bot] | 3730908881 | |
dependabot[bot] | 975dc1bc11 | |
dependabot[bot] | 31409f0c32 | |
dependabot[bot] | b19273227b | |
dependabot[bot] | f89179fb03 | |
Mike Degatano | 90c971f9f1 | |
Jan Čermák | d685780a4a | |
dependabot[bot] | b6bc8b7b7c | |
dependabot[bot] | 92daba898f | |
Mike Degatano | 138843591e | |
Jan Čermák | 0814552b2a | |
Mike Degatano | 0e0fadd72d | |
dependabot[bot] | 5426bd4392 | |
James Ross | 3520a65099 | |
Stefan Agner | b15a5c2c87 | |
Mike Degatano | a8af04ff82 | |
Mike Degatano | 2148de45a0 | |
dependabot[bot] | c4143dacee | |
dependabot[bot] | a8025e77b3 | |
dependabot[bot] | dd1e76be93 | |
dependabot[bot] | 36f997959a | |
dependabot[bot] | c1faed163a | |
Mike Degatano | 9ca927dbe7 | |
dependabot[bot] | 02c6011818 | |
dependabot[bot] | 2e96b16396 | |
dependabot[bot] | 53b8de6c1c | |
dependabot[bot] | daea9f893c | |
dependabot[bot] | d1b5b1734c | |
Mike Degatano | 74a5899626 | |
Mike Degatano | 202ebf6d4e | |
Mike Degatano | 2c7b417e25 | |
Stefan Agner | bb5e138134 | |
dependabot[bot] | 3a2c3e2f84 | |
dependabot[bot] | d5be0c34ac | |
dependabot[bot] | ea5431ef2b | |
dependabot[bot] | 9c4cdcd11f | |
dependabot[bot] | e5ef6333e4 | |
Mike Degatano | 98779a48b1 | |
Mike Degatano | 9d4848ee77 | |
Mike Degatano | 5126820619 | |
Mike Degatano | 8b5c808e8c | |
dependabot[bot] | 9c75996c40 | |
dependabot[bot] | d524778e42 |
|
@ -53,7 +53,7 @@ jobs:
|
|||
requirements: ${{ steps.requirements.outputs.changed }}
|
||||
steps:
|
||||
- name: Checkout the repository
|
||||
uses: actions/checkout@v4.1.1
|
||||
uses: actions/checkout@v4.1.6
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
|
@ -92,7 +92,7 @@ jobs:
|
|||
arch: ${{ fromJson(needs.init.outputs.architectures) }}
|
||||
steps:
|
||||
- name: Checkout the repository
|
||||
uses: actions/checkout@v4.1.1
|
||||
uses: actions/checkout@v4.1.6
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
|
@ -125,15 +125,15 @@ jobs:
|
|||
|
||||
- name: Set up Python ${{ env.DEFAULT_PYTHON }}
|
||||
if: needs.init.outputs.publish == 'true'
|
||||
uses: actions/setup-python@v5.0.0
|
||||
uses: actions/setup-python@v5.1.0
|
||||
with:
|
||||
python-version: ${{ env.DEFAULT_PYTHON }}
|
||||
|
||||
- name: Install Cosign
|
||||
if: needs.init.outputs.publish == 'true'
|
||||
uses: sigstore/cosign-installer@v3.4.0
|
||||
uses: sigstore/cosign-installer@v3.5.0
|
||||
with:
|
||||
cosign-release: "v2.0.2"
|
||||
cosign-release: "v2.2.3"
|
||||
|
||||
- name: Install dirhash and calc hash
|
||||
if: needs.init.outputs.publish == 'true'
|
||||
|
@ -149,7 +149,7 @@ jobs:
|
|||
|
||||
- name: Login to GitHub Container Registry
|
||||
if: needs.init.outputs.publish == 'true'
|
||||
uses: docker/login-action@v3.0.0
|
||||
uses: docker/login-action@v3.1.0
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.repository_owner }}
|
||||
|
@ -160,7 +160,7 @@ jobs:
|
|||
run: echo "BUILD_ARGS=--test" >> $GITHUB_ENV
|
||||
|
||||
- name: Build supervisor
|
||||
uses: home-assistant/builder@2024.01.0
|
||||
uses: home-assistant/builder@2024.03.5
|
||||
with:
|
||||
args: |
|
||||
$BUILD_ARGS \
|
||||
|
@ -178,7 +178,7 @@ jobs:
|
|||
steps:
|
||||
- name: Checkout the repository
|
||||
if: needs.init.outputs.publish == 'true'
|
||||
uses: actions/checkout@v4.1.1
|
||||
uses: actions/checkout@v4.1.6
|
||||
|
||||
- name: Initialize git
|
||||
if: needs.init.outputs.publish == 'true'
|
||||
|
@ -203,11 +203,11 @@ jobs:
|
|||
timeout-minutes: 60
|
||||
steps:
|
||||
- name: Checkout the repository
|
||||
uses: actions/checkout@v4.1.1
|
||||
uses: actions/checkout@v4.1.6
|
||||
|
||||
- name: Build the Supervisor
|
||||
if: needs.init.outputs.publish != 'true'
|
||||
uses: home-assistant/builder@2024.01.0
|
||||
uses: home-assistant/builder@2024.03.5
|
||||
with:
|
||||
args: |
|
||||
--test \
|
||||
|
|
|
@ -25,15 +25,15 @@ jobs:
|
|||
name: Prepare Python dependencies
|
||||
steps:
|
||||
- name: Check out code from GitHub
|
||||
uses: actions/checkout@v4.1.1
|
||||
uses: actions/checkout@v4.1.6
|
||||
- name: Set up Python
|
||||
id: python
|
||||
uses: actions/setup-python@v5.0.0
|
||||
uses: actions/setup-python@v5.1.0
|
||||
with:
|
||||
python-version: ${{ env.DEFAULT_PYTHON }}
|
||||
- name: Restore Python virtual environment
|
||||
id: cache-venv
|
||||
uses: actions/cache@v4.0.0
|
||||
uses: actions/cache@v4.0.2
|
||||
with:
|
||||
path: venv
|
||||
key: |
|
||||
|
@ -47,7 +47,7 @@ jobs:
|
|||
pip install -r requirements.txt -r requirements_tests.txt
|
||||
- name: Restore pre-commit environment from cache
|
||||
id: cache-precommit
|
||||
uses: actions/cache@v4.0.0
|
||||
uses: actions/cache@v4.0.2
|
||||
with:
|
||||
path: ${{ env.PRE_COMMIT_CACHE }}
|
||||
lookup-only: true
|
||||
|
@ -67,15 +67,15 @@ jobs:
|
|||
needs: prepare
|
||||
steps:
|
||||
- name: Check out code from GitHub
|
||||
uses: actions/checkout@v4.1.1
|
||||
uses: actions/checkout@v4.1.6
|
||||
- name: Set up Python ${{ needs.prepare.outputs.python-version }}
|
||||
uses: actions/setup-python@v5.0.0
|
||||
uses: actions/setup-python@v5.1.0
|
||||
id: python
|
||||
with:
|
||||
python-version: ${{ needs.prepare.outputs.python-version }}
|
||||
- name: Restore Python virtual environment
|
||||
id: cache-venv
|
||||
uses: actions/cache@v4.0.0
|
||||
uses: actions/cache@v4.0.2
|
||||
with:
|
||||
path: venv
|
||||
key: |
|
||||
|
@ -87,7 +87,7 @@ jobs:
|
|||
exit 1
|
||||
- name: Restore pre-commit environment from cache
|
||||
id: cache-precommit
|
||||
uses: actions/cache@v4.0.0
|
||||
uses: actions/cache@v4.0.2
|
||||
with:
|
||||
path: ${{ env.PRE_COMMIT_CACHE }}
|
||||
key: |
|
||||
|
@ -110,15 +110,15 @@ jobs:
|
|||
needs: prepare
|
||||
steps:
|
||||
- name: Check out code from GitHub
|
||||
uses: actions/checkout@v4.1.1
|
||||
uses: actions/checkout@v4.1.6
|
||||
- name: Set up Python ${{ needs.prepare.outputs.python-version }}
|
||||
uses: actions/setup-python@v5.0.0
|
||||
uses: actions/setup-python@v5.1.0
|
||||
id: python
|
||||
with:
|
||||
python-version: ${{ needs.prepare.outputs.python-version }}
|
||||
- name: Restore Python virtual environment
|
||||
id: cache-venv
|
||||
uses: actions/cache@v4.0.0
|
||||
uses: actions/cache@v4.0.2
|
||||
with:
|
||||
path: venv
|
||||
key: |
|
||||
|
@ -130,7 +130,7 @@ jobs:
|
|||
exit 1
|
||||
- name: Restore pre-commit environment from cache
|
||||
id: cache-precommit
|
||||
uses: actions/cache@v4.0.0
|
||||
uses: actions/cache@v4.0.2
|
||||
with:
|
||||
path: ${{ env.PRE_COMMIT_CACHE }}
|
||||
key: |
|
||||
|
@ -153,7 +153,7 @@ jobs:
|
|||
needs: prepare
|
||||
steps:
|
||||
- name: Check out code from GitHub
|
||||
uses: actions/checkout@v4.1.1
|
||||
uses: actions/checkout@v4.1.6
|
||||
- name: Register hadolint problem matcher
|
||||
run: |
|
||||
echo "::add-matcher::.github/workflows/matchers/hadolint.json"
|
||||
|
@ -168,15 +168,15 @@ jobs:
|
|||
needs: prepare
|
||||
steps:
|
||||
- name: Check out code from GitHub
|
||||
uses: actions/checkout@v4.1.1
|
||||
uses: actions/checkout@v4.1.6
|
||||
- name: Set up Python ${{ needs.prepare.outputs.python-version }}
|
||||
uses: actions/setup-python@v5.0.0
|
||||
uses: actions/setup-python@v5.1.0
|
||||
id: python
|
||||
with:
|
||||
python-version: ${{ needs.prepare.outputs.python-version }}
|
||||
- name: Restore Python virtual environment
|
||||
id: cache-venv
|
||||
uses: actions/cache@v4.0.0
|
||||
uses: actions/cache@v4.0.2
|
||||
with:
|
||||
path: venv
|
||||
key: |
|
||||
|
@ -188,7 +188,7 @@ jobs:
|
|||
exit 1
|
||||
- name: Restore pre-commit environment from cache
|
||||
id: cache-precommit
|
||||
uses: actions/cache@v4.0.0
|
||||
uses: actions/cache@v4.0.2
|
||||
with:
|
||||
path: ${{ env.PRE_COMMIT_CACHE }}
|
||||
key: |
|
||||
|
@ -212,15 +212,15 @@ jobs:
|
|||
needs: prepare
|
||||
steps:
|
||||
- name: Check out code from GitHub
|
||||
uses: actions/checkout@v4.1.1
|
||||
uses: actions/checkout@v4.1.6
|
||||
- name: Set up Python ${{ needs.prepare.outputs.python-version }}
|
||||
uses: actions/setup-python@v5.0.0
|
||||
uses: actions/setup-python@v5.1.0
|
||||
id: python
|
||||
with:
|
||||
python-version: ${{ needs.prepare.outputs.python-version }}
|
||||
- name: Restore Python virtual environment
|
||||
id: cache-venv
|
||||
uses: actions/cache@v4.0.0
|
||||
uses: actions/cache@v4.0.2
|
||||
with:
|
||||
path: venv
|
||||
key: |
|
||||
|
@ -232,7 +232,7 @@ jobs:
|
|||
exit 1
|
||||
- name: Restore pre-commit environment from cache
|
||||
id: cache-precommit
|
||||
uses: actions/cache@v4.0.0
|
||||
uses: actions/cache@v4.0.2
|
||||
with:
|
||||
path: ${{ env.PRE_COMMIT_CACHE }}
|
||||
key: |
|
||||
|
@ -256,15 +256,15 @@ jobs:
|
|||
needs: prepare
|
||||
steps:
|
||||
- name: Check out code from GitHub
|
||||
uses: actions/checkout@v4.1.1
|
||||
uses: actions/checkout@v4.1.6
|
||||
- name: Set up Python ${{ needs.prepare.outputs.python-version }}
|
||||
uses: actions/setup-python@v5.0.0
|
||||
uses: actions/setup-python@v5.1.0
|
||||
id: python
|
||||
with:
|
||||
python-version: ${{ needs.prepare.outputs.python-version }}
|
||||
- name: Restore Python virtual environment
|
||||
id: cache-venv
|
||||
uses: actions/cache@v4.0.0
|
||||
uses: actions/cache@v4.0.2
|
||||
with:
|
||||
path: venv
|
||||
key: |
|
||||
|
@ -288,19 +288,19 @@ jobs:
|
|||
name: Run tests Python ${{ needs.prepare.outputs.python-version }}
|
||||
steps:
|
||||
- name: Check out code from GitHub
|
||||
uses: actions/checkout@v4.1.1
|
||||
uses: actions/checkout@v4.1.6
|
||||
- name: Set up Python ${{ needs.prepare.outputs.python-version }}
|
||||
uses: actions/setup-python@v5.0.0
|
||||
uses: actions/setup-python@v5.1.0
|
||||
id: python
|
||||
with:
|
||||
python-version: ${{ needs.prepare.outputs.python-version }}
|
||||
- name: Install Cosign
|
||||
uses: sigstore/cosign-installer@v3.4.0
|
||||
uses: sigstore/cosign-installer@v3.5.0
|
||||
with:
|
||||
cosign-release: "v2.0.2"
|
||||
cosign-release: "v2.2.3"
|
||||
- name: Restore Python virtual environment
|
||||
id: cache-venv
|
||||
uses: actions/cache@v4.0.0
|
||||
uses: actions/cache@v4.0.2
|
||||
with:
|
||||
path: venv
|
||||
key: |
|
||||
|
@ -335,7 +335,7 @@ jobs:
|
|||
-o console_output_style=count \
|
||||
tests
|
||||
- name: Upload coverage artifact
|
||||
uses: actions/upload-artifact@v4.3.1
|
||||
uses: actions/upload-artifact@v4.3.3
|
||||
with:
|
||||
name: coverage-${{ matrix.python-version }}
|
||||
path: .coverage
|
||||
|
@ -346,15 +346,15 @@ jobs:
|
|||
needs: ["pytest", "prepare"]
|
||||
steps:
|
||||
- name: Check out code from GitHub
|
||||
uses: actions/checkout@v4.1.1
|
||||
uses: actions/checkout@v4.1.6
|
||||
- name: Set up Python ${{ needs.prepare.outputs.python-version }}
|
||||
uses: actions/setup-python@v5.0.0
|
||||
uses: actions/setup-python@v5.1.0
|
||||
id: python
|
||||
with:
|
||||
python-version: ${{ needs.prepare.outputs.python-version }}
|
||||
- name: Restore Python virtual environment
|
||||
id: cache-venv
|
||||
uses: actions/cache@v4.0.0
|
||||
uses: actions/cache@v4.0.2
|
||||
with:
|
||||
path: venv
|
||||
key: |
|
||||
|
@ -365,7 +365,7 @@ jobs:
|
|||
echo "Failed to restore Python virtual environment from cache"
|
||||
exit 1
|
||||
- name: Download all coverage artifacts
|
||||
uses: actions/download-artifact@v4.1.3
|
||||
uses: actions/download-artifact@v4.1.7
|
||||
- name: Combine coverage results
|
||||
run: |
|
||||
. venv/bin/activate
|
||||
|
@ -373,4 +373,4 @@ jobs:
|
|||
coverage report
|
||||
coverage xml
|
||||
- name: Upload coverage to Codecov
|
||||
uses: codecov/codecov-action@v4.1.0
|
||||
uses: codecov/codecov-action@v4.4.0
|
||||
|
|
|
@ -11,7 +11,7 @@ jobs:
|
|||
name: Release Drafter
|
||||
steps:
|
||||
- name: Checkout the repository
|
||||
uses: actions/checkout@v4.1.1
|
||||
uses: actions/checkout@v4.1.6
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
|
|
|
@ -10,7 +10,7 @@ jobs:
|
|||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Check out code from GitHub
|
||||
uses: actions/checkout@v4.1.1
|
||||
uses: actions/checkout@v4.1.6
|
||||
- name: Sentry Release
|
||||
uses: getsentry/action-release@v1.7.0
|
||||
env:
|
||||
|
|
12
build.yaml
12
build.yaml
|
@ -1,10 +1,10 @@
|
|||
image: ghcr.io/home-assistant/{arch}-hassio-supervisor
|
||||
build_from:
|
||||
aarch64: ghcr.io/home-assistant/aarch64-base-python:3.12-alpine3.18
|
||||
armhf: ghcr.io/home-assistant/armhf-base-python:3.12-alpine3.18
|
||||
armv7: ghcr.io/home-assistant/armv7-base-python:3.12-alpine3.18
|
||||
amd64: ghcr.io/home-assistant/amd64-base-python:3.12-alpine3.18
|
||||
i386: ghcr.io/home-assistant/i386-base-python:3.12-alpine3.18
|
||||
aarch64: ghcr.io/home-assistant/aarch64-base-python:3.12-alpine3.19
|
||||
armhf: ghcr.io/home-assistant/armhf-base-python:3.12-alpine3.19
|
||||
armv7: ghcr.io/home-assistant/armv7-base-python:3.12-alpine3.19
|
||||
amd64: ghcr.io/home-assistant/amd64-base-python:3.12-alpine3.19
|
||||
i386: ghcr.io/home-assistant/i386-base-python:3.12-alpine3.19
|
||||
codenotary:
|
||||
signer: notary@home-assistant.io
|
||||
base_image: notary@home-assistant.io
|
||||
|
@ -12,7 +12,7 @@ cosign:
|
|||
base_identity: https://github.com/home-assistant/docker-base/.*
|
||||
identity: https://github.com/home-assistant/supervisor/.*
|
||||
args:
|
||||
COSIGN_VERSION: 2.0.2
|
||||
COSIGN_VERSION: 2.2.3
|
||||
labels:
|
||||
io.hass.type: supervisor
|
||||
org.opencontainers.image.title: Home Assistant Supervisor
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
aiodns==3.1.1
|
||||
aiohttp==3.9.3
|
||||
aiodns==3.2.0
|
||||
aiohttp==3.9.5
|
||||
aiohttp-fast-url-dispatcher==0.3.0
|
||||
atomicwrites-homeassistant==1.4.1
|
||||
attrs==23.2.0
|
||||
|
@ -8,22 +8,22 @@ brotli==1.1.0
|
|||
ciso8601==2.3.1
|
||||
colorlog==6.8.2
|
||||
cpe==1.2.1
|
||||
cryptography==42.0.5
|
||||
cryptography==42.0.7
|
||||
debugpy==1.8.1
|
||||
deepmerge==1.1.1
|
||||
dirhash==0.2.1
|
||||
dirhash==0.4.0
|
||||
docker==7.0.0
|
||||
faust-cchardet==2.1.19
|
||||
gitpython==3.1.42
|
||||
jinja2==3.1.3
|
||||
gitpython==3.1.43
|
||||
jinja2==3.1.4
|
||||
orjson==3.9.15
|
||||
pulsectl==23.5.2
|
||||
pyudev==0.24.1
|
||||
pulsectl==24.4.0
|
||||
pyudev==0.24.3
|
||||
PyYAML==6.0.1
|
||||
securetar==2024.2.1
|
||||
sentry-sdk==1.40.5
|
||||
setuptools==69.1.1
|
||||
sentry-sdk==2.2.0
|
||||
setuptools==69.5.1
|
||||
voluptuous==0.14.2
|
||||
dbus-fast==2.21.1
|
||||
typing_extensions==4.10.0
|
||||
dbus-fast==2.21.2
|
||||
typing_extensions==4.11.0
|
||||
zlib-fast==0.2.0
|
||||
|
|
|
@ -1,12 +1,12 @@
|
|||
coverage==7.4.3
|
||||
pre-commit==3.6.2
|
||||
pylint==3.1.0
|
||||
coverage==7.5.1
|
||||
pre-commit==3.7.1
|
||||
pylint==3.1.1
|
||||
pytest-aiohttp==1.0.5
|
||||
pytest-asyncio==0.23.5
|
||||
pytest-cov==4.1.0
|
||||
pytest-timeout==2.2.0
|
||||
pytest==8.0.1
|
||||
ruff==0.2.2
|
||||
time-machine==2.13.0
|
||||
typing_extensions==4.10.0
|
||||
pytest-asyncio==0.23.6
|
||||
pytest-cov==5.0.0
|
||||
pytest-timeout==2.3.1
|
||||
pytest==8.2.0
|
||||
ruff==0.4.4
|
||||
time-machine==2.14.1
|
||||
typing_extensions==4.11.0
|
||||
urllib3==2.2.1
|
||||
|
|
|
@ -180,6 +180,9 @@ class Addon(AddonModel):
|
|||
|
||||
async def load(self) -> None:
|
||||
"""Async initialize of object."""
|
||||
if self.is_detached:
|
||||
await super().refresh_path_cache()
|
||||
|
||||
self._listeners.append(
|
||||
self.sys_bus.register_event(
|
||||
BusEvent.DOCKER_CONTAINER_STATE_CHANGE, self.container_state_changed
|
||||
|
@ -192,9 +195,20 @@ class Addon(AddonModel):
|
|||
)
|
||||
|
||||
await self._check_ingress_port()
|
||||
with suppress(DockerError):
|
||||
default_image = self._image(self.data)
|
||||
try:
|
||||
await self.instance.attach(version=self.version)
|
||||
|
||||
# Ensure we are using correct image for this system
|
||||
await self.instance.check_image(self.version, default_image, self.arch)
|
||||
except DockerError:
|
||||
_LOGGER.info("No %s addon Docker image %s found", self.slug, self.image)
|
||||
with suppress(DockerError):
|
||||
await self.instance.install(self.version, default_image, arch=self.arch)
|
||||
|
||||
self.persist[ATTR_IMAGE] = default_image
|
||||
self.save_persist()
|
||||
|
||||
@property
|
||||
def ip_address(self) -> IPv4Address:
|
||||
"""Return IP of add-on instance."""
|
||||
|
@ -230,6 +244,34 @@ class Addon(AddonModel):
|
|||
"""Return True if add-on is detached."""
|
||||
return self.slug not in self.sys_store.data.addons
|
||||
|
||||
@property
|
||||
def with_icon(self) -> bool:
|
||||
"""Return True if an icon exists."""
|
||||
if self.is_detached:
|
||||
return super().with_icon
|
||||
return self.addon_store.with_icon
|
||||
|
||||
@property
|
||||
def with_logo(self) -> bool:
|
||||
"""Return True if a logo exists."""
|
||||
if self.is_detached:
|
||||
return super().with_logo
|
||||
return self.addon_store.with_logo
|
||||
|
||||
@property
|
||||
def with_changelog(self) -> bool:
|
||||
"""Return True if a changelog exists."""
|
||||
if self.is_detached:
|
||||
return super().with_changelog
|
||||
return self.addon_store.with_changelog
|
||||
|
||||
@property
|
||||
def with_documentation(self) -> bool:
|
||||
"""Return True if a documentation exists."""
|
||||
if self.is_detached:
|
||||
return super().with_documentation
|
||||
return self.addon_store.with_documentation
|
||||
|
||||
@property
|
||||
def available(self) -> bool:
|
||||
"""Return True if this add-on is available on this platform."""
|
||||
|
@ -687,7 +729,7 @@ class Addon(AddonModel):
|
|||
limit=JobExecutionLimit.GROUP_ONCE,
|
||||
on_condition=AddonsJobError,
|
||||
)
|
||||
async def uninstall(self) -> None:
|
||||
async def uninstall(self, *, remove_config: bool) -> None:
|
||||
"""Uninstall and cleanup this addon."""
|
||||
try:
|
||||
await self.instance.remove()
|
||||
|
@ -698,6 +740,10 @@ class Addon(AddonModel):
|
|||
|
||||
await self.unload()
|
||||
|
||||
# Remove config if present and requested
|
||||
if self.addon_config_used and remove_config:
|
||||
await remove_data(self.path_config)
|
||||
|
||||
# Cleanup audio settings
|
||||
if self.path_pulse.exists():
|
||||
with suppress(OSError):
|
||||
|
@ -1395,3 +1441,9 @@ class Addon(AddonModel):
|
|||
ContainerState.UNHEALTHY,
|
||||
]:
|
||||
await self._restart_after_problem(event.state)
|
||||
|
||||
def refresh_path_cache(self) -> Awaitable[None]:
|
||||
"""Refresh cache of existing paths."""
|
||||
if self.is_detached:
|
||||
return super().refresh_path_cache()
|
||||
return self.addon_store.refresh_path_cache()
|
||||
|
|
|
@ -102,11 +102,11 @@ class AddonBuild(FileConfiguration, CoreSysAttributes):
|
|||
except HassioArchNotFound:
|
||||
return False
|
||||
|
||||
def get_docker_args(self, version: AwesomeVersion):
|
||||
def get_docker_args(self, version: AwesomeVersion, image: str | None = None):
|
||||
"""Create a dict with Docker build arguments."""
|
||||
args = {
|
||||
"path": str(self.addon.path_location),
|
||||
"tag": f"{self.addon.image}:{version!s}",
|
||||
"tag": f"{image or self.addon.image}:{version!s}",
|
||||
"dockerfile": str(self.dockerfile),
|
||||
"pull": True,
|
||||
"forcerm": not self.sys_dev,
|
||||
|
|
|
@ -77,15 +77,20 @@ class AddonManager(CoreSysAttributes):
|
|||
|
||||
async def load(self) -> None:
|
||||
"""Start up add-on management."""
|
||||
tasks = []
|
||||
# Refresh cache for all store addons
|
||||
tasks: list[Awaitable[None]] = [
|
||||
store.refresh_path_cache() for store in self.store.values()
|
||||
]
|
||||
|
||||
# Load all installed addons
|
||||
for slug in self.data.system:
|
||||
addon = self.local[slug] = Addon(self.coresys, slug)
|
||||
tasks.append(self.sys_create_task(addon.load()))
|
||||
tasks.append(addon.load())
|
||||
|
||||
# Run initial tasks
|
||||
_LOGGER.info("Found %d installed add-ons", len(tasks))
|
||||
_LOGGER.info("Found %d installed add-ons", len(self.data.system))
|
||||
if tasks:
|
||||
await asyncio.wait(tasks)
|
||||
await asyncio.gather(*tasks)
|
||||
|
||||
# Sync DNS
|
||||
await self.sync_dns()
|
||||
|
@ -173,13 +178,13 @@ class AddonManager(CoreSysAttributes):
|
|||
|
||||
_LOGGER.info("Add-on '%s' successfully installed", slug)
|
||||
|
||||
async def uninstall(self, slug: str) -> None:
|
||||
async def uninstall(self, slug: str, *, remove_config: bool = False) -> None:
|
||||
"""Remove an add-on."""
|
||||
if slug not in self.local:
|
||||
_LOGGER.warning("Add-on %s is not installed", slug)
|
||||
return
|
||||
|
||||
await self.local[slug].uninstall()
|
||||
await self.local[slug].uninstall(remove_config=remove_config)
|
||||
|
||||
_LOGGER.info("Add-on '%s' successfully removed", slug)
|
||||
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
"""Init file for Supervisor add-ons."""
|
||||
from abc import ABC, abstractmethod
|
||||
from collections import defaultdict
|
||||
from collections.abc import Callable
|
||||
from collections.abc import Awaitable, Callable
|
||||
from contextlib import suppress
|
||||
from datetime import datetime
|
||||
import logging
|
||||
|
@ -118,6 +118,10 @@ class AddonModel(JobGroup, ABC):
|
|||
coresys, JOB_GROUP_ADDON.format_map(defaultdict(str, slug=slug)), slug
|
||||
)
|
||||
self.slug: str = slug
|
||||
self._path_icon_exists: bool = False
|
||||
self._path_logo_exists: bool = False
|
||||
self._path_changelog_exists: bool = False
|
||||
self._path_documentation_exists: bool = False
|
||||
|
||||
@property
|
||||
@abstractmethod
|
||||
|
@ -511,22 +515,22 @@ class AddonModel(JobGroup, ABC):
|
|||
@property
|
||||
def with_icon(self) -> bool:
|
||||
"""Return True if an icon exists."""
|
||||
return self.path_icon.exists()
|
||||
return self._path_icon_exists
|
||||
|
||||
@property
|
||||
def with_logo(self) -> bool:
|
||||
"""Return True if a logo exists."""
|
||||
return self.path_logo.exists()
|
||||
return self._path_logo_exists
|
||||
|
||||
@property
|
||||
def with_changelog(self) -> bool:
|
||||
"""Return True if a changelog exists."""
|
||||
return self.path_changelog.exists()
|
||||
return self._path_changelog_exists
|
||||
|
||||
@property
|
||||
def with_documentation(self) -> bool:
|
||||
"""Return True if a documentation exists."""
|
||||
return self.path_documentation.exists()
|
||||
return self._path_documentation_exists
|
||||
|
||||
@property
|
||||
def supported_arch(self) -> list[str]:
|
||||
|
@ -635,6 +639,17 @@ class AddonModel(JobGroup, ABC):
|
|||
"""Return breaking versions of addon."""
|
||||
return self.data[ATTR_BREAKING_VERSIONS]
|
||||
|
||||
def refresh_path_cache(self) -> Awaitable[None]:
|
||||
"""Refresh cache of existing paths."""
|
||||
|
||||
def check_paths():
|
||||
self._path_icon_exists = self.path_icon.exists()
|
||||
self._path_logo_exists = self.path_logo.exists()
|
||||
self._path_changelog_exists = self.path_changelog.exists()
|
||||
self._path_documentation_exists = self.path_documentation.exists()
|
||||
|
||||
return self.sys_run_in_executor(check_paths)
|
||||
|
||||
def validate_availability(self) -> None:
|
||||
"""Validate if addon is available for current system."""
|
||||
return self._validate_availability(self.data, logger=_LOGGER.error)
|
||||
|
|
|
@ -99,7 +99,6 @@ from ..const import (
|
|||
AddonStartup,
|
||||
AddonState,
|
||||
)
|
||||
from ..discovery.validate import valid_discovery_service
|
||||
from ..docker.const import Capabilities
|
||||
from ..validate import (
|
||||
docker_image,
|
||||
|
@ -190,20 +189,6 @@ def _warn_addon_config(config: dict[str, Any]):
|
|||
name,
|
||||
)
|
||||
|
||||
invalid_services: list[str] = []
|
||||
for service in config.get(ATTR_DISCOVERY, []):
|
||||
try:
|
||||
valid_discovery_service(service)
|
||||
except vol.Invalid:
|
||||
invalid_services.append(service)
|
||||
|
||||
if invalid_services:
|
||||
_LOGGER.warning(
|
||||
"Add-on lists the following unknown services for discovery: %s. Please report this to the maintainer of %s",
|
||||
", ".join(invalid_services),
|
||||
name,
|
||||
)
|
||||
|
||||
return config
|
||||
|
||||
|
||||
|
|
|
@ -9,12 +9,14 @@ from aiohttp_fast_url_dispatcher import FastUrlDispatcher, attach_fast_url_dispa
|
|||
|
||||
from ..const import AddonState
|
||||
from ..coresys import CoreSys, CoreSysAttributes
|
||||
from ..exceptions import APIAddonNotInstalled
|
||||
from ..exceptions import APIAddonNotInstalled, HostNotSupportedError
|
||||
from ..utils.sentry import capture_exception
|
||||
from .addons import APIAddons
|
||||
from .audio import APIAudio
|
||||
from .auth import APIAuth
|
||||
from .backups import APIBackups
|
||||
from .cli import APICli
|
||||
from .const import CONTENT_TYPE_TEXT
|
||||
from .discovery import APIDiscovery
|
||||
from .dns import APICoreDNS
|
||||
from .docker import APIDocker
|
||||
|
@ -36,7 +38,7 @@ from .security import APISecurity
|
|||
from .services import APIServices
|
||||
from .store import APIStore
|
||||
from .supervisor import APISupervisor
|
||||
from .utils import api_process
|
||||
from .utils import api_process, api_process_raw
|
||||
|
||||
_LOGGER: logging.Logger = logging.getLogger(__name__)
|
||||
|
||||
|
@ -71,8 +73,14 @@ class RestAPI(CoreSysAttributes):
|
|||
self._runner: web.AppRunner = web.AppRunner(self.webapp, shutdown_timeout=5)
|
||||
self._site: web.TCPSite | None = None
|
||||
|
||||
# share single host API handler for reuse in logging endpoints
|
||||
self._api_host: APIHost | None = None
|
||||
|
||||
async def load(self) -> None:
|
||||
"""Register REST API Calls."""
|
||||
self._api_host = APIHost()
|
||||
self._api_host.coresys = self.coresys
|
||||
|
||||
self._register_addons()
|
||||
self._register_audio()
|
||||
self._register_auth()
|
||||
|
@ -102,10 +110,41 @@ class RestAPI(CoreSysAttributes):
|
|||
|
||||
await self.start()
|
||||
|
||||
def _register_advanced_logs(self, path: str, syslog_identifier: str):
|
||||
"""Register logs endpoint for a given path, returning logs for single syslog identifier."""
|
||||
|
||||
self.webapp.add_routes(
|
||||
[
|
||||
web.get(
|
||||
f"{path}/logs",
|
||||
partial(self._api_host.advanced_logs, identifier=syslog_identifier),
|
||||
),
|
||||
web.get(
|
||||
f"{path}/logs/follow",
|
||||
partial(
|
||||
self._api_host.advanced_logs,
|
||||
identifier=syslog_identifier,
|
||||
follow=True,
|
||||
),
|
||||
),
|
||||
web.get(
|
||||
f"{path}/logs/boots/{{bootid}}",
|
||||
partial(self._api_host.advanced_logs, identifier=syslog_identifier),
|
||||
),
|
||||
web.get(
|
||||
f"{path}/logs/boots/{{bootid}}/follow",
|
||||
partial(
|
||||
self._api_host.advanced_logs,
|
||||
identifier=syslog_identifier,
|
||||
follow=True,
|
||||
),
|
||||
),
|
||||
]
|
||||
)
|
||||
|
||||
def _register_host(self) -> None:
|
||||
"""Register hostcontrol functions."""
|
||||
api_host = APIHost()
|
||||
api_host.coresys = self.coresys
|
||||
api_host = self._api_host
|
||||
|
||||
self.webapp.add_routes(
|
||||
[
|
||||
|
@ -182,6 +221,8 @@ class RestAPI(CoreSysAttributes):
|
|||
web.post("/os/config/sync", api_os.config_sync),
|
||||
web.post("/os/datadisk/move", api_os.migrate_data),
|
||||
web.get("/os/datadisk/list", api_os.list_data),
|
||||
web.post("/os/datadisk/wipe", api_os.wipe_data),
|
||||
web.post("/os/boot-slot", api_os.set_boot_slot),
|
||||
]
|
||||
)
|
||||
|
||||
|
@ -259,11 +300,11 @@ class RestAPI(CoreSysAttributes):
|
|||
[
|
||||
web.get("/multicast/info", api_multicast.info),
|
||||
web.get("/multicast/stats", api_multicast.stats),
|
||||
web.get("/multicast/logs", api_multicast.logs),
|
||||
web.post("/multicast/update", api_multicast.update),
|
||||
web.post("/multicast/restart", api_multicast.restart),
|
||||
]
|
||||
)
|
||||
self._register_advanced_logs("/multicast", "hassio_multicast")
|
||||
|
||||
def _register_hardware(self) -> None:
|
||||
"""Register hardware functions."""
|
||||
|
@ -336,6 +377,7 @@ class RestAPI(CoreSysAttributes):
|
|||
web.post("/auth", api_auth.auth),
|
||||
web.post("/auth/reset", api_auth.reset),
|
||||
web.delete("/auth/cache", api_auth.cache),
|
||||
web.get("/auth/list", api_auth.list_users),
|
||||
]
|
||||
)
|
||||
|
||||
|
@ -349,7 +391,6 @@ class RestAPI(CoreSysAttributes):
|
|||
web.get("/supervisor/ping", api_supervisor.ping),
|
||||
web.get("/supervisor/info", api_supervisor.info),
|
||||
web.get("/supervisor/stats", api_supervisor.stats),
|
||||
web.get("/supervisor/logs", api_supervisor.logs),
|
||||
web.post("/supervisor/update", api_supervisor.update),
|
||||
web.post("/supervisor/reload", api_supervisor.reload),
|
||||
web.post("/supervisor/restart", api_supervisor.restart),
|
||||
|
@ -358,6 +399,38 @@ class RestAPI(CoreSysAttributes):
|
|||
]
|
||||
)
|
||||
|
||||
async def get_supervisor_logs(*args, **kwargs):
|
||||
try:
|
||||
return await self._api_host.advanced_logs_handler(
|
||||
*args, identifier="hassio_supervisor", **kwargs
|
||||
)
|
||||
except Exception as err: # pylint: disable=broad-exception-caught
|
||||
# Supervisor logs are critical, so catch everything, log the exception
|
||||
# and try to return Docker container logs as the fallback
|
||||
_LOGGER.exception(
|
||||
"Failed to get supervisor logs using advanced_logs API"
|
||||
)
|
||||
if not isinstance(err, HostNotSupportedError):
|
||||
# No need to capture HostNotSupportedError to Sentry, the cause
|
||||
# is known and reported to the user using the resolution center.
|
||||
capture_exception(err)
|
||||
return await api_supervisor.logs(*args, **kwargs)
|
||||
|
||||
self.webapp.add_routes(
|
||||
[
|
||||
web.get("/supervisor/logs", get_supervisor_logs),
|
||||
web.get(
|
||||
"/supervisor/logs/follow",
|
||||
partial(get_supervisor_logs, follow=True),
|
||||
),
|
||||
web.get("/supervisor/logs/boots/{bootid}", get_supervisor_logs),
|
||||
web.get(
|
||||
"/supervisor/logs/boots/{bootid}/follow",
|
||||
partial(get_supervisor_logs, follow=True),
|
||||
),
|
||||
]
|
||||
)
|
||||
|
||||
def _register_homeassistant(self) -> None:
|
||||
"""Register Home Assistant functions."""
|
||||
api_hass = APIHomeAssistant()
|
||||
|
@ -366,7 +439,6 @@ class RestAPI(CoreSysAttributes):
|
|||
self.webapp.add_routes(
|
||||
[
|
||||
web.get("/core/info", api_hass.info),
|
||||
web.get("/core/logs", api_hass.logs),
|
||||
web.get("/core/stats", api_hass.stats),
|
||||
web.post("/core/options", api_hass.options),
|
||||
web.post("/core/update", api_hass.update),
|
||||
|
@ -378,11 +450,12 @@ class RestAPI(CoreSysAttributes):
|
|||
]
|
||||
)
|
||||
|
||||
self._register_advanced_logs("/core", "homeassistant")
|
||||
|
||||
# Reroute from legacy
|
||||
self.webapp.add_routes(
|
||||
[
|
||||
web.get("/homeassistant/info", api_hass.info),
|
||||
web.get("/homeassistant/logs", api_hass.logs),
|
||||
web.get("/homeassistant/stats", api_hass.stats),
|
||||
web.post("/homeassistant/options", api_hass.options),
|
||||
web.post("/homeassistant/restart", api_hass.restart),
|
||||
|
@ -394,6 +467,8 @@ class RestAPI(CoreSysAttributes):
|
|||
]
|
||||
)
|
||||
|
||||
self._register_advanced_logs("/homeassistant", "homeassistant")
|
||||
|
||||
def _register_proxy(self) -> None:
|
||||
"""Register Home Assistant API Proxy."""
|
||||
api_proxy = APIProxy()
|
||||
|
@ -440,13 +515,33 @@ class RestAPI(CoreSysAttributes):
|
|||
),
|
||||
web.get("/addons/{addon}/options/config", api_addons.options_config),
|
||||
web.post("/addons/{addon}/rebuild", api_addons.rebuild),
|
||||
web.get("/addons/{addon}/logs", api_addons.logs),
|
||||
web.post("/addons/{addon}/stdin", api_addons.stdin),
|
||||
web.post("/addons/{addon}/security", api_addons.security),
|
||||
web.get("/addons/{addon}/stats", api_addons.stats),
|
||||
]
|
||||
)
|
||||
|
||||
@api_process_raw(CONTENT_TYPE_TEXT, error_type=CONTENT_TYPE_TEXT)
|
||||
async def get_addon_logs(request, *args, **kwargs):
|
||||
addon = api_addons.get_addon_for_request(request)
|
||||
kwargs["identifier"] = f"addon_{addon.slug}"
|
||||
return await self._api_host.advanced_logs(request, *args, **kwargs)
|
||||
|
||||
self.webapp.add_routes(
|
||||
[
|
||||
web.get("/addons/{addon}/logs", get_addon_logs),
|
||||
web.get(
|
||||
"/addons/{addon}/logs/follow",
|
||||
partial(get_addon_logs, follow=True),
|
||||
),
|
||||
web.get("/addons/{addon}/logs/boots/{bootid}", get_addon_logs),
|
||||
web.get(
|
||||
"/addons/{addon}/logs/boots/{bootid}/follow",
|
||||
partial(get_addon_logs, follow=True),
|
||||
),
|
||||
]
|
||||
)
|
||||
|
||||
# Legacy routing to support requests for not installed addons
|
||||
api_store = APIStore()
|
||||
api_store.coresys = self.coresys
|
||||
|
@ -544,7 +639,6 @@ class RestAPI(CoreSysAttributes):
|
|||
[
|
||||
web.get("/dns/info", api_dns.info),
|
||||
web.get("/dns/stats", api_dns.stats),
|
||||
web.get("/dns/logs", api_dns.logs),
|
||||
web.post("/dns/update", api_dns.update),
|
||||
web.post("/dns/options", api_dns.options),
|
||||
web.post("/dns/restart", api_dns.restart),
|
||||
|
@ -552,18 +646,17 @@ class RestAPI(CoreSysAttributes):
|
|||
]
|
||||
)
|
||||
|
||||
self._register_advanced_logs("/dns", "hassio_dns")
|
||||
|
||||
def _register_audio(self) -> None:
|
||||
"""Register Audio functions."""
|
||||
api_audio = APIAudio()
|
||||
api_audio.coresys = self.coresys
|
||||
api_host = APIHost()
|
||||
api_host.coresys = self.coresys
|
||||
|
||||
self.webapp.add_routes(
|
||||
[
|
||||
web.get("/audio/info", api_audio.info),
|
||||
web.get("/audio/stats", api_audio.stats),
|
||||
web.get("/audio/logs", api_audio.logs),
|
||||
web.post("/audio/update", api_audio.update),
|
||||
web.post("/audio/restart", api_audio.restart),
|
||||
web.post("/audio/reload", api_audio.reload),
|
||||
|
@ -576,6 +669,8 @@ class RestAPI(CoreSysAttributes):
|
|||
]
|
||||
)
|
||||
|
||||
self._register_advanced_logs("/audio", "hassio_audio")
|
||||
|
||||
def _register_mounts(self) -> None:
|
||||
"""Register mounts endpoints."""
|
||||
api_mounts = APIMounts()
|
||||
|
@ -602,7 +697,6 @@ class RestAPI(CoreSysAttributes):
|
|||
web.get("/store", api_store.store_info),
|
||||
web.get("/store/addons", api_store.addons_list),
|
||||
web.get("/store/addons/{addon}", api_store.addons_addon_info),
|
||||
web.get("/store/addons/{addon}/{version}", api_store.addons_addon_info),
|
||||
web.get("/store/addons/{addon}/icon", api_store.addons_addon_icon),
|
||||
web.get("/store/addons/{addon}/logo", api_store.addons_addon_logo),
|
||||
web.get(
|
||||
|
@ -624,6 +718,8 @@ class RestAPI(CoreSysAttributes):
|
|||
"/store/addons/{addon}/update/{version}",
|
||||
api_store.addons_addon_update,
|
||||
),
|
||||
# Must be below others since it has a wildcard in resource path
|
||||
web.get("/store/addons/{addon}/{version}", api_store.addons_addon_info),
|
||||
web.post("/store/reload", api_store.reload),
|
||||
web.get("/store/repositories", api_store.repositories_list),
|
||||
web.get(
|
||||
|
|
|
@ -106,8 +106,8 @@ from ..exceptions import (
|
|||
PwnedSecret,
|
||||
)
|
||||
from ..validate import docker_ports
|
||||
from .const import ATTR_SIGNED, CONTENT_TYPE_BINARY
|
||||
from .utils import api_process, api_process_raw, api_validate, json_loads
|
||||
from .const import ATTR_REMOVE_CONFIG, ATTR_SIGNED
|
||||
from .utils import api_process, api_validate, json_loads
|
||||
|
||||
_LOGGER: logging.Logger = logging.getLogger(__name__)
|
||||
|
||||
|
@ -126,15 +126,19 @@ SCHEMA_OPTIONS = vol.Schema(
|
|||
}
|
||||
)
|
||||
|
||||
# pylint: disable=no-value-for-parameter
|
||||
SCHEMA_SECURITY = vol.Schema({vol.Optional(ATTR_PROTECTED): vol.Boolean()})
|
||||
|
||||
SCHEMA_UNINSTALL = vol.Schema(
|
||||
{vol.Optional(ATTR_REMOVE_CONFIG, default=False): vol.Boolean()}
|
||||
)
|
||||
# pylint: enable=no-value-for-parameter
|
||||
|
||||
|
||||
class APIAddons(CoreSysAttributes):
|
||||
"""Handle RESTful API for add-on functions."""
|
||||
|
||||
def _extract_addon(self, request: web.Request) -> Addon:
|
||||
"""Return addon, throw an exception it it doesn't exist."""
|
||||
def get_addon_for_request(self, request: web.Request) -> Addon:
|
||||
"""Return addon, throw an exception if it doesn't exist."""
|
||||
addon_slug: str = request.match_info.get("addon")
|
||||
|
||||
# Lookup itself
|
||||
|
@ -187,7 +191,7 @@ class APIAddons(CoreSysAttributes):
|
|||
|
||||
async def info(self, request: web.Request) -> dict[str, Any]:
|
||||
"""Return add-on information."""
|
||||
addon: AnyAddon = self._extract_addon(request)
|
||||
addon: AnyAddon = self.get_addon_for_request(request)
|
||||
|
||||
data = {
|
||||
ATTR_NAME: addon.name,
|
||||
|
@ -268,7 +272,7 @@ class APIAddons(CoreSysAttributes):
|
|||
@api_process
|
||||
async def options(self, request: web.Request) -> None:
|
||||
"""Store user options for add-on."""
|
||||
addon = self._extract_addon(request)
|
||||
addon = self.get_addon_for_request(request)
|
||||
|
||||
# Update secrets for validation
|
||||
await self.sys_homeassistant.secrets.reload()
|
||||
|
@ -303,7 +307,7 @@ class APIAddons(CoreSysAttributes):
|
|||
@api_process
|
||||
async def options_validate(self, request: web.Request) -> None:
|
||||
"""Validate user options for add-on."""
|
||||
addon = self._extract_addon(request)
|
||||
addon = self.get_addon_for_request(request)
|
||||
data = {ATTR_MESSAGE: "", ATTR_VALID: True, ATTR_PWNED: False}
|
||||
|
||||
options = await request.json(loads=json_loads) or addon.options
|
||||
|
@ -345,7 +349,7 @@ class APIAddons(CoreSysAttributes):
|
|||
slug: str = request.match_info.get("addon")
|
||||
if slug != "self":
|
||||
raise APIForbidden("This can be only read by the Add-on itself!")
|
||||
addon = self._extract_addon(request)
|
||||
addon = self.get_addon_for_request(request)
|
||||
|
||||
# Lookup/reload secrets
|
||||
await self.sys_homeassistant.secrets.reload()
|
||||
|
@ -357,7 +361,7 @@ class APIAddons(CoreSysAttributes):
|
|||
@api_process
|
||||
async def security(self, request: web.Request) -> None:
|
||||
"""Store security options for add-on."""
|
||||
addon = self._extract_addon(request)
|
||||
addon = self.get_addon_for_request(request)
|
||||
body: dict[str, Any] = await api_validate(SCHEMA_SECURITY, request)
|
||||
|
||||
if ATTR_PROTECTED in body:
|
||||
|
@ -369,7 +373,7 @@ class APIAddons(CoreSysAttributes):
|
|||
@api_process
|
||||
async def stats(self, request: web.Request) -> dict[str, Any]:
|
||||
"""Return resource information."""
|
||||
addon = self._extract_addon(request)
|
||||
addon = self.get_addon_for_request(request)
|
||||
|
||||
stats: DockerStats = await addon.stats()
|
||||
|
||||
|
@ -385,48 +389,47 @@ class APIAddons(CoreSysAttributes):
|
|||
}
|
||||
|
||||
@api_process
|
||||
def uninstall(self, request: web.Request) -> Awaitable[None]:
|
||||
async def uninstall(self, request: web.Request) -> Awaitable[None]:
|
||||
"""Uninstall add-on."""
|
||||
addon = self._extract_addon(request)
|
||||
return asyncio.shield(self.sys_addons.uninstall(addon.slug))
|
||||
addon = self.get_addon_for_request(request)
|
||||
body: dict[str, Any] = await api_validate(SCHEMA_UNINSTALL, request)
|
||||
return await asyncio.shield(
|
||||
self.sys_addons.uninstall(
|
||||
addon.slug, remove_config=body[ATTR_REMOVE_CONFIG]
|
||||
)
|
||||
)
|
||||
|
||||
@api_process
|
||||
async def start(self, request: web.Request) -> None:
|
||||
"""Start add-on."""
|
||||
addon = self._extract_addon(request)
|
||||
addon = self.get_addon_for_request(request)
|
||||
if start_task := await asyncio.shield(addon.start()):
|
||||
await start_task
|
||||
|
||||
@api_process
|
||||
def stop(self, request: web.Request) -> Awaitable[None]:
|
||||
"""Stop add-on."""
|
||||
addon = self._extract_addon(request)
|
||||
addon = self.get_addon_for_request(request)
|
||||
return asyncio.shield(addon.stop())
|
||||
|
||||
@api_process
|
||||
async def restart(self, request: web.Request) -> None:
|
||||
"""Restart add-on."""
|
||||
addon: Addon = self._extract_addon(request)
|
||||
addon: Addon = self.get_addon_for_request(request)
|
||||
if start_task := await asyncio.shield(addon.restart()):
|
||||
await start_task
|
||||
|
||||
@api_process
|
||||
async def rebuild(self, request: web.Request) -> None:
|
||||
"""Rebuild local build add-on."""
|
||||
addon = self._extract_addon(request)
|
||||
addon = self.get_addon_for_request(request)
|
||||
if start_task := await asyncio.shield(self.sys_addons.rebuild(addon.slug)):
|
||||
await start_task
|
||||
|
||||
@api_process_raw(CONTENT_TYPE_BINARY)
|
||||
def logs(self, request: web.Request) -> Awaitable[bytes]:
|
||||
"""Return logs from add-on."""
|
||||
addon = self._extract_addon(request)
|
||||
return addon.logs()
|
||||
|
||||
@api_process
|
||||
async def stdin(self, request: web.Request) -> None:
|
||||
"""Write to stdin of add-on."""
|
||||
addon = self._extract_addon(request)
|
||||
addon = self.get_addon_for_request(request)
|
||||
if not addon.with_stdin:
|
||||
raise APIError(f"STDIN not supported the {addon.slug} add-on")
|
||||
|
||||
|
|
|
@ -35,8 +35,7 @@ from ..coresys import CoreSysAttributes
|
|||
from ..exceptions import APIError
|
||||
from ..host.sound import StreamType
|
||||
from ..validate import version_tag
|
||||
from .const import CONTENT_TYPE_BINARY
|
||||
from .utils import api_process, api_process_raw, api_validate
|
||||
from .utils import api_process, api_validate
|
||||
|
||||
_LOGGER: logging.Logger = logging.getLogger(__name__)
|
||||
|
||||
|
@ -111,11 +110,6 @@ class APIAudio(CoreSysAttributes):
|
|||
raise APIError(f"Version {version} is already in use")
|
||||
await asyncio.shield(self.sys_plugins.audio.update(version))
|
||||
|
||||
@api_process_raw(CONTENT_TYPE_BINARY)
|
||||
def logs(self, request: web.Request) -> Awaitable[bytes]:
|
||||
"""Return Audio Docker logs."""
|
||||
return self.sys_plugins.audio.logs()
|
||||
|
||||
@api_process
|
||||
def restart(self, request: web.Request) -> Awaitable[None]:
|
||||
"""Restart Audio plugin."""
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
"""Init file for Supervisor auth/SSO RESTful API."""
|
||||
import asyncio
|
||||
import logging
|
||||
from typing import Any
|
||||
|
||||
from aiohttp import BasicAuth, web
|
||||
from aiohttp.hdrs import AUTHORIZATION, CONTENT_TYPE, WWW_AUTHENTICATE
|
||||
|
@ -8,11 +9,19 @@ from aiohttp.web_exceptions import HTTPUnauthorized
|
|||
import voluptuous as vol
|
||||
|
||||
from ..addons.addon import Addon
|
||||
from ..const import ATTR_PASSWORD, ATTR_USERNAME, REQUEST_FROM
|
||||
from ..const import ATTR_NAME, ATTR_PASSWORD, ATTR_USERNAME, REQUEST_FROM
|
||||
from ..coresys import CoreSysAttributes
|
||||
from ..exceptions import APIForbidden
|
||||
from ..utils.json import json_loads
|
||||
from .const import CONTENT_TYPE_JSON, CONTENT_TYPE_URL
|
||||
from .const import (
|
||||
ATTR_GROUP_IDS,
|
||||
ATTR_IS_ACTIVE,
|
||||
ATTR_IS_OWNER,
|
||||
ATTR_LOCAL_ONLY,
|
||||
ATTR_USERS,
|
||||
CONTENT_TYPE_JSON,
|
||||
CONTENT_TYPE_URL,
|
||||
)
|
||||
from .utils import api_process, api_validate
|
||||
|
||||
_LOGGER: logging.Logger = logging.getLogger(__name__)
|
||||
|
@ -90,3 +99,21 @@ class APIAuth(CoreSysAttributes):
|
|||
async def cache(self, request: web.Request) -> None:
|
||||
"""Process cache reset request."""
|
||||
self.sys_auth.reset_data()
|
||||
|
||||
@api_process
|
||||
async def list_users(self, request: web.Request) -> dict[str, list[dict[str, Any]]]:
|
||||
"""List users on the Home Assistant instance."""
|
||||
return {
|
||||
ATTR_USERS: [
|
||||
{
|
||||
ATTR_USERNAME: user[ATTR_USERNAME],
|
||||
ATTR_NAME: user[ATTR_NAME],
|
||||
ATTR_IS_OWNER: user[ATTR_IS_OWNER],
|
||||
ATTR_IS_ACTIVE: user[ATTR_IS_ACTIVE],
|
||||
ATTR_LOCAL_ONLY: user[ATTR_LOCAL_ONLY],
|
||||
ATTR_GROUP_IDS: user[ATTR_GROUP_IDS],
|
||||
}
|
||||
for user in await self.sys_auth.list_users()
|
||||
if user[ATTR_USERNAME]
|
||||
]
|
||||
}
|
||||
|
|
|
@ -1,11 +1,14 @@
|
|||
"""Const for API."""
|
||||
|
||||
from enum import StrEnum
|
||||
|
||||
CONTENT_TYPE_BINARY = "application/octet-stream"
|
||||
CONTENT_TYPE_JSON = "application/json"
|
||||
CONTENT_TYPE_PNG = "image/png"
|
||||
CONTENT_TYPE_TAR = "application/tar"
|
||||
CONTENT_TYPE_TEXT = "text/plain"
|
||||
CONTENT_TYPE_URL = "application/x-www-form-urlencoded"
|
||||
CONTENT_TYPE_X_LOG = "text/x-log"
|
||||
|
||||
COOKIE_INGRESS = "ingress_session"
|
||||
|
||||
|
@ -14,6 +17,8 @@ ATTR_APPARMOR_VERSION = "apparmor_version"
|
|||
ATTR_ATTRIBUTES = "attributes"
|
||||
ATTR_AVAILABLE_UPDATES = "available_updates"
|
||||
ATTR_BACKGROUND = "background"
|
||||
ATTR_BOOT_SLOT = "boot_slot"
|
||||
ATTR_BOOT_SLOTS = "boot_slots"
|
||||
ATTR_BOOT_TIMESTAMP = "boot_timestamp"
|
||||
ATTR_BOOTS = "boots"
|
||||
ATTR_BROADCAST_LLMNR = "broadcast_llmnr"
|
||||
|
@ -31,26 +36,42 @@ ATTR_DT_UTC = "dt_utc"
|
|||
ATTR_EJECTABLE = "ejectable"
|
||||
ATTR_FALLBACK = "fallback"
|
||||
ATTR_FILESYSTEMS = "filesystems"
|
||||
ATTR_GROUP_IDS = "group_ids"
|
||||
ATTR_IDENTIFIERS = "identifiers"
|
||||
ATTR_IS_ACTIVE = "is_active"
|
||||
ATTR_IS_OWNER = "is_owner"
|
||||
ATTR_JOB_ID = "job_id"
|
||||
ATTR_JOBS = "jobs"
|
||||
ATTR_LLMNR = "llmnr"
|
||||
ATTR_LLMNR_HOSTNAME = "llmnr_hostname"
|
||||
ATTR_LOCAL_ONLY = "local_only"
|
||||
ATTR_MDNS = "mdns"
|
||||
ATTR_MODEL = "model"
|
||||
ATTR_MOUNTS = "mounts"
|
||||
ATTR_MOUNT_POINTS = "mount_points"
|
||||
ATTR_PANEL_PATH = "panel_path"
|
||||
ATTR_REMOVABLE = "removable"
|
||||
ATTR_REMOVE_CONFIG = "remove_config"
|
||||
ATTR_REVISION = "revision"
|
||||
ATTR_SAFE_MODE = "safe_mode"
|
||||
ATTR_SEAT = "seat"
|
||||
ATTR_SIGNED = "signed"
|
||||
ATTR_STARTUP_TIME = "startup_time"
|
||||
ATTR_STATUS = "status"
|
||||
ATTR_SUBSYSTEM = "subsystem"
|
||||
ATTR_SYSFS = "sysfs"
|
||||
ATTR_SYSTEM_HEALTH_LED = "system_health_led"
|
||||
ATTR_TIME_DETECTED = "time_detected"
|
||||
ATTR_UPDATE_TYPE = "update_type"
|
||||
ATTR_USE_NTP = "use_ntp"
|
||||
ATTR_USAGE = "usage"
|
||||
ATTR_USE_NTP = "use_ntp"
|
||||
ATTR_USERS = "users"
|
||||
ATTR_VENDOR = "vendor"
|
||||
ATTR_VIRTUALIZATION = "virtualization"
|
||||
|
||||
|
||||
class BootSlot(StrEnum):
|
||||
"""Boot slots used by HAOS."""
|
||||
|
||||
A = "A"
|
||||
B = "B"
|
||||
|
|
|
@ -15,7 +15,6 @@ from ..const import (
|
|||
AddonState,
|
||||
)
|
||||
from ..coresys import CoreSysAttributes
|
||||
from ..discovery.validate import valid_discovery_service
|
||||
from ..exceptions import APIError, APIForbidden
|
||||
from .utils import api_process, api_validate, require_home_assistant
|
||||
|
||||
|
@ -24,7 +23,7 @@ _LOGGER: logging.Logger = logging.getLogger(__name__)
|
|||
SCHEMA_DISCOVERY = vol.Schema(
|
||||
{
|
||||
vol.Required(ATTR_SERVICE): str,
|
||||
vol.Optional(ATTR_CONFIG): vol.Maybe(dict),
|
||||
vol.Required(ATTR_CONFIG): dict,
|
||||
}
|
||||
)
|
||||
|
||||
|
@ -71,15 +70,6 @@ class APIDiscovery(CoreSysAttributes):
|
|||
addon: Addon = request[REQUEST_FROM]
|
||||
service = body[ATTR_SERVICE]
|
||||
|
||||
try:
|
||||
valid_discovery_service(service)
|
||||
except vol.Invalid:
|
||||
_LOGGER.warning(
|
||||
"Received discovery message for unknown service %s from addon %s. Please report this to the maintainer of the add-on",
|
||||
service,
|
||||
addon.name,
|
||||
)
|
||||
|
||||
# Access?
|
||||
if body[ATTR_SERVICE] not in addon.discovery:
|
||||
_LOGGER.error(
|
||||
|
|
|
@ -26,8 +26,8 @@ from ..const import (
|
|||
from ..coresys import CoreSysAttributes
|
||||
from ..exceptions import APIError
|
||||
from ..validate import dns_server_list, version_tag
|
||||
from .const import ATTR_FALLBACK, ATTR_LLMNR, ATTR_MDNS, CONTENT_TYPE_BINARY
|
||||
from .utils import api_process, api_process_raw, api_validate
|
||||
from .const import ATTR_FALLBACK, ATTR_LLMNR, ATTR_MDNS
|
||||
from .utils import api_process, api_validate
|
||||
|
||||
_LOGGER: logging.Logger = logging.getLogger(__name__)
|
||||
|
||||
|
@ -105,11 +105,6 @@ class APICoreDNS(CoreSysAttributes):
|
|||
raise APIError(f"Version {version} is already in use")
|
||||
await asyncio.shield(self.sys_plugins.dns.update(version))
|
||||
|
||||
@api_process_raw(CONTENT_TYPE_BINARY)
|
||||
def logs(self, request: web.Request) -> Awaitable[bytes]:
|
||||
"""Return DNS Docker logs."""
|
||||
return self.sys_plugins.dns.logs()
|
||||
|
||||
@api_process
|
||||
def restart(self, request: web.Request) -> Awaitable[None]:
|
||||
"""Restart CoreDNS plugin."""
|
||||
|
|
|
@ -16,7 +16,7 @@ from ..const import (
|
|||
ATTR_SYSTEM,
|
||||
)
|
||||
from ..coresys import CoreSysAttributes
|
||||
from ..dbus.udisks2 import UDisks2
|
||||
from ..dbus.udisks2 import UDisks2Manager
|
||||
from ..dbus.udisks2.block import UDisks2Block
|
||||
from ..dbus.udisks2.drive import UDisks2Drive
|
||||
from ..hardware.data import Device
|
||||
|
@ -72,7 +72,7 @@ def filesystem_struct(fs_block: UDisks2Block) -> dict[str, Any]:
|
|||
}
|
||||
|
||||
|
||||
def drive_struct(udisks2: UDisks2, drive: UDisks2Drive) -> dict[str, Any]:
|
||||
def drive_struct(udisks2: UDisks2Manager, drive: UDisks2Drive) -> dict[str, Any]:
|
||||
"""Return a dict with information of a disk to be used in the API."""
|
||||
return {
|
||||
ATTR_VENDOR: drive.vendor,
|
||||
|
|
|
@ -36,8 +36,8 @@ from ..const import (
|
|||
from ..coresys import CoreSysAttributes
|
||||
from ..exceptions import APIError
|
||||
from ..validate import docker_image, network_port, version_tag
|
||||
from .const import CONTENT_TYPE_BINARY
|
||||
from .utils import api_process, api_process_raw, api_validate
|
||||
from .const import ATTR_SAFE_MODE
|
||||
from .utils import api_process, api_validate
|
||||
|
||||
_LOGGER: logging.Logger = logging.getLogger(__name__)
|
||||
|
||||
|
@ -63,6 +63,12 @@ SCHEMA_UPDATE = vol.Schema(
|
|||
}
|
||||
)
|
||||
|
||||
SCHEMA_RESTART = vol.Schema(
|
||||
{
|
||||
vol.Optional(ATTR_SAFE_MODE, default=False): vol.Boolean(),
|
||||
}
|
||||
)
|
||||
|
||||
|
||||
class APIHomeAssistant(CoreSysAttributes):
|
||||
"""Handle RESTful API for Home Assistant functions."""
|
||||
|
@ -94,6 +100,9 @@ class APIHomeAssistant(CoreSysAttributes):
|
|||
|
||||
if ATTR_IMAGE in body:
|
||||
self.sys_homeassistant.image = body[ATTR_IMAGE]
|
||||
self.sys_homeassistant.override_image = (
|
||||
self.sys_homeassistant.image != self.sys_homeassistant.default_image
|
||||
)
|
||||
|
||||
if ATTR_BOOT in body:
|
||||
self.sys_homeassistant.boot = body[ATTR_BOOT]
|
||||
|
@ -164,20 +173,19 @@ class APIHomeAssistant(CoreSysAttributes):
|
|||
return asyncio.shield(self.sys_homeassistant.core.start())
|
||||
|
||||
@api_process
|
||||
def restart(self, request: web.Request) -> Awaitable[None]:
|
||||
async def restart(self, request: web.Request) -> None:
|
||||
"""Restart Home Assistant."""
|
||||
return asyncio.shield(self.sys_homeassistant.core.restart())
|
||||
body = await api_validate(SCHEMA_RESTART, request)
|
||||
|
||||
await asyncio.shield(
|
||||
self.sys_homeassistant.core.restart(safe_mode=body[ATTR_SAFE_MODE])
|
||||
)
|
||||
|
||||
@api_process
|
||||
def rebuild(self, request: web.Request) -> Awaitable[None]:
|
||||
"""Rebuild Home Assistant."""
|
||||
return asyncio.shield(self.sys_homeassistant.core.rebuild())
|
||||
|
||||
@api_process_raw(CONTENT_TYPE_BINARY)
|
||||
def logs(self, request: web.Request) -> Awaitable[bytes]:
|
||||
"""Return Home Assistant Docker logs."""
|
||||
return self.sys_homeassistant.core.logs()
|
||||
|
||||
@api_process
|
||||
async def check(self, request: web.Request) -> None:
|
||||
"""Check configuration of Home Assistant."""
|
||||
|
|
|
@ -1,4 +1,5 @@
|
|||
"""Init file for Supervisor host RESTful API."""
|
||||
|
||||
import asyncio
|
||||
from contextlib import suppress
|
||||
import logging
|
||||
|
@ -28,7 +29,14 @@ from ..const import (
|
|||
)
|
||||
from ..coresys import CoreSysAttributes
|
||||
from ..exceptions import APIError, HostLogError
|
||||
from ..host.const import PARAM_BOOT_ID, PARAM_FOLLOW, PARAM_SYSLOG_IDENTIFIER
|
||||
from ..host.const import (
|
||||
PARAM_BOOT_ID,
|
||||
PARAM_FOLLOW,
|
||||
PARAM_SYSLOG_IDENTIFIER,
|
||||
LogFormat,
|
||||
LogFormatter,
|
||||
)
|
||||
from ..utils.systemd_journal import journal_logs_reader
|
||||
from .const import (
|
||||
ATTR_AGENT_VERSION,
|
||||
ATTR_APPARMOR_VERSION,
|
||||
|
@ -42,9 +50,11 @@ from .const import (
|
|||
ATTR_LLMNR_HOSTNAME,
|
||||
ATTR_STARTUP_TIME,
|
||||
ATTR_USE_NTP,
|
||||
ATTR_VIRTUALIZATION,
|
||||
CONTENT_TYPE_TEXT,
|
||||
CONTENT_TYPE_X_LOG,
|
||||
)
|
||||
from .utils import api_process, api_validate
|
||||
from .utils import api_process, api_process_raw, api_validate
|
||||
|
||||
_LOGGER: logging.Logger = logging.getLogger(__name__)
|
||||
|
||||
|
@ -65,6 +75,7 @@ class APIHost(CoreSysAttributes):
|
|||
ATTR_AGENT_VERSION: self.sys_dbus.agent.version,
|
||||
ATTR_APPARMOR_VERSION: self.sys_host.apparmor.version,
|
||||
ATTR_CHASSIS: self.sys_host.info.chassis,
|
||||
ATTR_VIRTUALIZATION: self.sys_host.info.virtualization,
|
||||
ATTR_CPE: self.sys_host.info.cpe,
|
||||
ATTR_DEPLOYMENT: self.sys_host.info.deployment,
|
||||
ATTR_DISK_FREE: self.sys_host.info.free_space,
|
||||
|
@ -153,11 +164,11 @@ class APIHost(CoreSysAttributes):
|
|||
raise APIError() from err
|
||||
return possible_offset
|
||||
|
||||
@api_process
|
||||
async def advanced_logs(
|
||||
async def advanced_logs_handler(
|
||||
self, request: web.Request, identifier: str | None = None, follow: bool = False
|
||||
) -> web.StreamResponse:
|
||||
"""Return systemd-journald logs."""
|
||||
log_formatter = LogFormatter.PLAIN
|
||||
params = {}
|
||||
if identifier:
|
||||
params[PARAM_SYSLOG_IDENTIFIER] = identifier
|
||||
|
@ -165,6 +176,8 @@ class APIHost(CoreSysAttributes):
|
|||
params[PARAM_SYSLOG_IDENTIFIER] = request.match_info.get(IDENTIFIER)
|
||||
else:
|
||||
params[PARAM_SYSLOG_IDENTIFIER] = self.sys_host.logs.default_identifiers
|
||||
# host logs should be always verbose, no matter what Accept header is used
|
||||
log_formatter = LogFormatter.VERBOSE
|
||||
|
||||
if BOOTID in request.match_info:
|
||||
params[PARAM_BOOT_ID] = await self._get_boot_id(
|
||||
|
@ -175,28 +188,40 @@ class APIHost(CoreSysAttributes):
|
|||
|
||||
if ACCEPT in request.headers and request.headers[ACCEPT] not in [
|
||||
CONTENT_TYPE_TEXT,
|
||||
CONTENT_TYPE_X_LOG,
|
||||
"*/*",
|
||||
]:
|
||||
raise APIError(
|
||||
"Invalid content type requested. Only text/plain supported for now."
|
||||
"Invalid content type requested. Only text/plain and text/x-log "
|
||||
"supported for now."
|
||||
)
|
||||
|
||||
if request.headers[ACCEPT] == CONTENT_TYPE_X_LOG:
|
||||
log_formatter = LogFormatter.VERBOSE
|
||||
|
||||
if RANGE in request.headers:
|
||||
range_header = request.headers.get(RANGE)
|
||||
else:
|
||||
range_header = f"entries=:-{DEFAULT_RANGE}:"
|
||||
|
||||
async with self.sys_host.logs.journald_logs(
|
||||
params=params, range_header=range_header
|
||||
params=params, range_header=range_header, accept=LogFormat.JOURNAL
|
||||
) as resp:
|
||||
try:
|
||||
response = web.StreamResponse()
|
||||
response.content_type = CONTENT_TYPE_TEXT
|
||||
await response.prepare(request)
|
||||
async for data in resp.content:
|
||||
await response.write(data)
|
||||
async for line in journal_logs_reader(resp, log_formatter):
|
||||
await response.write(line.encode("utf-8") + b"\n")
|
||||
except ConnectionResetError as ex:
|
||||
raise APIError(
|
||||
"Connection reset when trying to fetch data from systemd-journald."
|
||||
) from ex
|
||||
return response
|
||||
|
||||
@api_process_raw(CONTENT_TYPE_TEXT, error_type=CONTENT_TYPE_TEXT)
|
||||
async def advanced_logs(
|
||||
self, request: web.Request, identifier: str | None = None, follow: bool = False
|
||||
) -> web.StreamResponse:
|
||||
"""Return systemd-journald logs. Wrapped as standard API handler."""
|
||||
return await self.advanced_logs_handler(request, identifier, follow)
|
||||
|
|
|
@ -118,7 +118,7 @@ ADDONS_ROLE_ACCESS: dict[str, re.Pattern] = {
|
|||
r"|/multicast/.+"
|
||||
r"|/network/.+"
|
||||
r"|/observer/.+"
|
||||
r"|/os/.+"
|
||||
r"|/os/(?!datadisk/wipe).+"
|
||||
r"|/refresh_updates"
|
||||
r"|/resolution/.+"
|
||||
r"|/security/.+"
|
||||
|
|
|
@ -23,8 +23,7 @@ from ..const import (
|
|||
from ..coresys import CoreSysAttributes
|
||||
from ..exceptions import APIError
|
||||
from ..validate import version_tag
|
||||
from .const import CONTENT_TYPE_BINARY
|
||||
from .utils import api_process, api_process_raw, api_validate
|
||||
from .utils import api_process, api_validate
|
||||
|
||||
_LOGGER: logging.Logger = logging.getLogger(__name__)
|
||||
|
||||
|
@ -69,11 +68,6 @@ class APIMulticast(CoreSysAttributes):
|
|||
raise APIError(f"Version {version} is already in use")
|
||||
await asyncio.shield(self.sys_plugins.multicast.update(version))
|
||||
|
||||
@api_process_raw(CONTENT_TYPE_BINARY)
|
||||
def logs(self, request: web.Request) -> Awaitable[bytes]:
|
||||
"""Return Multicast Docker logs."""
|
||||
return self.sys_plugins.multicast.logs()
|
||||
|
||||
@api_process
|
||||
def restart(self, request: web.Request) -> Awaitable[None]:
|
||||
"""Restart Multicast plugin."""
|
||||
|
|
|
@ -19,6 +19,7 @@ from ..const import (
|
|||
ATTR_POWER_LED,
|
||||
ATTR_SERIAL,
|
||||
ATTR_SIZE,
|
||||
ATTR_STATE,
|
||||
ATTR_UPDATE_AVAILABLE,
|
||||
ATTR_VERSION,
|
||||
ATTR_VERSION_LATEST,
|
||||
|
@ -28,13 +29,17 @@ from ..exceptions import BoardInvalidError
|
|||
from ..resolution.const import ContextType, IssueType, SuggestionType
|
||||
from ..validate import version_tag
|
||||
from .const import (
|
||||
ATTR_BOOT_SLOT,
|
||||
ATTR_BOOT_SLOTS,
|
||||
ATTR_DATA_DISK,
|
||||
ATTR_DEV_PATH,
|
||||
ATTR_DEVICE,
|
||||
ATTR_DISKS,
|
||||
ATTR_MODEL,
|
||||
ATTR_STATUS,
|
||||
ATTR_SYSTEM_HEALTH_LED,
|
||||
ATTR_VENDOR,
|
||||
BootSlot,
|
||||
)
|
||||
from .utils import api_process, api_validate
|
||||
|
||||
|
@ -42,6 +47,7 @@ _LOGGER: logging.Logger = logging.getLogger(__name__)
|
|||
|
||||
# pylint: disable=no-value-for-parameter
|
||||
SCHEMA_VERSION = vol.Schema({vol.Optional(ATTR_VERSION): version_tag})
|
||||
SCHEMA_SET_BOOT_SLOT = vol.Schema({vol.Required(ATTR_BOOT_SLOT): vol.Coerce(BootSlot)})
|
||||
SCHEMA_DISK = vol.Schema({vol.Required(ATTR_DEVICE): str})
|
||||
|
||||
SCHEMA_YELLOW_OPTIONS = vol.Schema(
|
||||
|
@ -74,6 +80,15 @@ class APIOS(CoreSysAttributes):
|
|||
ATTR_BOARD: self.sys_os.board,
|
||||
ATTR_BOOT: self.sys_dbus.rauc.boot_slot,
|
||||
ATTR_DATA_DISK: self.sys_os.datadisk.disk_used_id,
|
||||
ATTR_BOOT_SLOTS: {
|
||||
slot.bootname: {
|
||||
ATTR_STATE: slot.state,
|
||||
ATTR_STATUS: slot.boot_status,
|
||||
ATTR_VERSION: slot.bundle_version,
|
||||
}
|
||||
for slot in self.sys_os.slots
|
||||
if slot.bootname
|
||||
},
|
||||
}
|
||||
|
||||
@api_process
|
||||
|
@ -96,6 +111,17 @@ class APIOS(CoreSysAttributes):
|
|||
|
||||
await asyncio.shield(self.sys_os.datadisk.migrate_disk(body[ATTR_DEVICE]))
|
||||
|
||||
@api_process
|
||||
def wipe_data(self, request: web.Request) -> Awaitable[None]:
|
||||
"""Trigger data disk wipe on Host."""
|
||||
return asyncio.shield(self.sys_os.datadisk.wipe_disk())
|
||||
|
||||
@api_process
|
||||
async def set_boot_slot(self, request: web.Request) -> None:
|
||||
"""Change the active boot slot and reboot into it."""
|
||||
body = await api_validate(SCHEMA_SET_BOOT_SLOT, request)
|
||||
await asyncio.shield(self.sys_os.set_boot_slot(body[ATTR_BOOT_SLOT]))
|
||||
|
||||
@api_process
|
||||
async def list_data(self, request: web.Request) -> dict[str, Any]:
|
||||
"""Return possible data targets."""
|
||||
|
|
|
@ -186,6 +186,9 @@ class APIProxy(CoreSysAttributes):
|
|||
return await target.send_str(msg.data)
|
||||
if msg.type == WSMsgType.BINARY:
|
||||
return await target.send_bytes(msg.data)
|
||||
if msg.type == WSMsgType.CLOSE:
|
||||
_LOGGER.debug("Received close message from WebSocket.")
|
||||
return await target.close()
|
||||
|
||||
raise TypeError(
|
||||
f"Cannot proxy websocket message of unsupported type: {msg.type}"
|
||||
|
@ -200,6 +203,7 @@ class APIProxy(CoreSysAttributes):
|
|||
# init server
|
||||
server = web.WebSocketResponse(heartbeat=30)
|
||||
await server.prepare(request)
|
||||
addon_name = None
|
||||
|
||||
# handle authentication
|
||||
try:
|
||||
|
@ -223,7 +227,8 @@ class APIProxy(CoreSysAttributes):
|
|||
)
|
||||
return server
|
||||
|
||||
_LOGGER.info("WebSocket access from %s", addon.slug)
|
||||
addon_name = addon.slug
|
||||
_LOGGER.info("WebSocket access from %s", addon_name)
|
||||
|
||||
await server.send_json(
|
||||
{"type": "auth_ok", "ha_version": self.sys_homeassistant.version},
|
||||
|
@ -282,5 +287,5 @@ class APIProxy(CoreSysAttributes):
|
|||
if not server.closed:
|
||||
await server.close()
|
||||
|
||||
_LOGGER.info("Home Assistant WebSocket API connection is closed")
|
||||
_LOGGER.info("Home Assistant WebSocket API for %s closed", addon_name)
|
||||
return server
|
||||
|
|
|
@ -251,7 +251,7 @@ class APIStore(CoreSysAttributes):
|
|||
"""Return changelog from add-on."""
|
||||
addon = self._extract_addon(request)
|
||||
if not addon.with_changelog:
|
||||
raise APIError(f"No changelog found for add-on {addon.slug}!")
|
||||
return f"No changelog found for add-on {addon.slug}!"
|
||||
|
||||
with addon.path_changelog.open("r") as changelog:
|
||||
return changelog.read()
|
||||
|
|
|
@ -49,7 +49,7 @@ from ..store.validate import repositories
|
|||
from ..utils.sentry import close_sentry, init_sentry
|
||||
from ..utils.validate import validate_timezone
|
||||
from ..validate import version_tag, wait_boot
|
||||
from .const import CONTENT_TYPE_BINARY
|
||||
from .const import CONTENT_TYPE_TEXT
|
||||
from .utils import api_process, api_process_raw, api_validate
|
||||
|
||||
_LOGGER: logging.Logger = logging.getLogger(__name__)
|
||||
|
@ -229,7 +229,7 @@ class APISupervisor(CoreSysAttributes):
|
|||
"""Soft restart Supervisor."""
|
||||
return asyncio.shield(self.sys_supervisor.restart())
|
||||
|
||||
@api_process_raw(CONTENT_TYPE_BINARY)
|
||||
@api_process_raw(CONTENT_TYPE_TEXT, error_type=CONTENT_TYPE_TEXT)
|
||||
def logs(self, request: web.Request) -> Awaitable[bytes]:
|
||||
"""Return supervisor Docker logs."""
|
||||
return self.sys_supervisor.logs()
|
||||
|
|
|
@ -25,7 +25,7 @@ from ..exceptions import APIError, APIForbidden, DockerAPIError, HassioError
|
|||
from ..utils import check_exception_chain, get_message_from_exception_chain
|
||||
from ..utils.json import json_dumps, json_loads as json_loads_util
|
||||
from ..utils.log_format import format_message
|
||||
from .const import CONTENT_TYPE_BINARY
|
||||
from . import const
|
||||
|
||||
|
||||
def excract_supervisor_token(request: web.Request) -> str | None:
|
||||
|
@ -91,7 +91,7 @@ def require_home_assistant(method):
|
|||
return wrap_api
|
||||
|
||||
|
||||
def api_process_raw(content):
|
||||
def api_process_raw(content, *, error_type=None):
|
||||
"""Wrap content_type into function."""
|
||||
|
||||
def wrap_method(method):
|
||||
|
@ -101,15 +101,15 @@ def api_process_raw(content):
|
|||
"""Return api information."""
|
||||
try:
|
||||
msg_data = await method(api, *args, **kwargs)
|
||||
msg_type = content
|
||||
except (APIError, APIForbidden) as err:
|
||||
msg_data = str(err).encode()
|
||||
msg_type = CONTENT_TYPE_BINARY
|
||||
except HassioError:
|
||||
msg_data = b""
|
||||
msg_type = CONTENT_TYPE_BINARY
|
||||
except HassioError as err:
|
||||
return api_return_error(
|
||||
err, error_type=error_type or const.CONTENT_TYPE_BINARY
|
||||
)
|
||||
|
||||
return web.Response(body=msg_data, content_type=msg_type)
|
||||
if isinstance(msg_data, (web.Response, web.StreamResponse)):
|
||||
return msg_data
|
||||
|
||||
return web.Response(body=msg_data, content_type=content)
|
||||
|
||||
return wrap_api
|
||||
|
||||
|
@ -117,24 +117,40 @@ def api_process_raw(content):
|
|||
|
||||
|
||||
def api_return_error(
|
||||
error: Exception | None = None, message: str | None = None
|
||||
error: Exception | None = None,
|
||||
message: str | None = None,
|
||||
error_type: str | None = None,
|
||||
) -> web.Response:
|
||||
"""Return an API error message."""
|
||||
if error and not message:
|
||||
message = get_message_from_exception_chain(error)
|
||||
if check_exception_chain(error, DockerAPIError):
|
||||
message = format_message(message)
|
||||
if not message:
|
||||
message = "Unknown error, see supervisor"
|
||||
|
||||
result = {
|
||||
JSON_RESULT: RESULT_ERROR,
|
||||
JSON_MESSAGE: message or "Unknown error, see supervisor",
|
||||
}
|
||||
if isinstance(error, APIError) and error.job_id:
|
||||
result[JSON_JOB_ID] = error.job_id
|
||||
status = 400
|
||||
if is_api_error := isinstance(error, APIError):
|
||||
status = error.status
|
||||
|
||||
match error_type:
|
||||
case const.CONTENT_TYPE_TEXT:
|
||||
return web.Response(body=message, content_type=error_type, status=status)
|
||||
case const.CONTENT_TYPE_BINARY:
|
||||
return web.Response(
|
||||
body=message.encode(), content_type=error_type, status=status
|
||||
)
|
||||
case _:
|
||||
result = {
|
||||
JSON_RESULT: RESULT_ERROR,
|
||||
JSON_MESSAGE: message,
|
||||
}
|
||||
if is_api_error and error.job_id:
|
||||
result[JSON_JOB_ID] = error.job_id
|
||||
|
||||
return web.json_response(
|
||||
result,
|
||||
status=400,
|
||||
status=status,
|
||||
dumps=json_dumps,
|
||||
)
|
||||
|
||||
|
|
|
@ -2,11 +2,18 @@
|
|||
import asyncio
|
||||
import hashlib
|
||||
import logging
|
||||
from typing import Any
|
||||
|
||||
from .addons.addon import Addon
|
||||
from .const import ATTR_ADDON, ATTR_PASSWORD, ATTR_USERNAME, FILE_HASSIO_AUTH
|
||||
from .const import ATTR_ADDON, ATTR_PASSWORD, ATTR_TYPE, ATTR_USERNAME, FILE_HASSIO_AUTH
|
||||
from .coresys import CoreSys, CoreSysAttributes
|
||||
from .exceptions import AuthError, AuthPasswordResetError, HomeAssistantAPIError
|
||||
from .exceptions import (
|
||||
AuthError,
|
||||
AuthListUsersError,
|
||||
AuthPasswordResetError,
|
||||
HomeAssistantAPIError,
|
||||
HomeAssistantWSError,
|
||||
)
|
||||
from .utils.common import FileConfiguration
|
||||
from .validate import SCHEMA_AUTH_CONFIG
|
||||
|
||||
|
@ -132,6 +139,17 @@ class Auth(FileConfiguration, CoreSysAttributes):
|
|||
|
||||
raise AuthPasswordResetError()
|
||||
|
||||
async def list_users(self) -> list[dict[str, Any]]:
|
||||
"""List users on the Home Assistant instance."""
|
||||
try:
|
||||
return await self.sys_homeassistant.websocket.async_send_command(
|
||||
{ATTR_TYPE: "config/auth/list"}
|
||||
)
|
||||
except HomeAssistantWSError:
|
||||
_LOGGER.error("Can't request listing users on Home Assistant!")
|
||||
|
||||
raise AuthListUsersError()
|
||||
|
||||
@staticmethod
|
||||
def _rehash(value: str, salt2: str = "") -> str:
|
||||
"""Rehash a value."""
|
||||
|
|
|
@ -256,9 +256,11 @@ def migrate_system_env(coresys: CoreSys) -> None:
|
|||
def initialize_logging() -> None:
|
||||
"""Initialize the logging."""
|
||||
logging.basicConfig(level=logging.INFO)
|
||||
fmt = "%(asctime)s %(levelname)s (%(threadName)s) [%(name)s] %(message)s"
|
||||
fmt = (
|
||||
"%(asctime)s.%(msecs)03d %(levelname)s (%(threadName)s) [%(name)s] %(message)s"
|
||||
)
|
||||
colorfmt = f"%(log_color)s{fmt}%(reset)s"
|
||||
datefmt = "%y-%m-%d %H:%M:%S"
|
||||
datefmt = "%Y-%m-%d %H:%M:%S"
|
||||
|
||||
# suppress overly verbose logs from libraries that aren't helpful
|
||||
logging.getLogger("aiohttp.access").setLevel(logging.WARNING)
|
||||
|
|
|
@ -12,6 +12,6 @@ class System(DBusInterface):
|
|||
object_path: str = DBUS_OBJECT_HAOS_SYSTEM
|
||||
|
||||
@dbus_connected
|
||||
async def schedule_wipe_device(self) -> None:
|
||||
async def schedule_wipe_device(self) -> bool:
|
||||
"""Schedule a factory reset on next system boot."""
|
||||
await self.dbus.System.call_schedule_wipe_device()
|
||||
return await self.dbus.System.call_schedule_wipe_device()
|
||||
|
|
|
@ -61,7 +61,8 @@ DBUS_OBJECT_RESOLVED = "/org/freedesktop/resolve1"
|
|||
DBUS_OBJECT_SETTINGS = "/org/freedesktop/NetworkManager/Settings"
|
||||
DBUS_OBJECT_SYSTEMD = "/org/freedesktop/systemd1"
|
||||
DBUS_OBJECT_TIMEDATE = "/org/freedesktop/timedate1"
|
||||
DBUS_OBJECT_UDISKS2 = "/org/freedesktop/UDisks2/Manager"
|
||||
DBUS_OBJECT_UDISKS2 = "/org/freedesktop/UDisks2"
|
||||
DBUS_OBJECT_UDISKS2_MANAGER = "/org/freedesktop/UDisks2/Manager"
|
||||
|
||||
DBUS_ATTR_ACTIVE_ACCESSPOINT = "ActiveAccessPoint"
|
||||
DBUS_ATTR_ACTIVE_CONNECTION = "ActiveConnection"
|
||||
|
@ -180,6 +181,7 @@ DBUS_ATTR_UUID = "Uuid"
|
|||
DBUS_ATTR_VARIANT = "Variant"
|
||||
DBUS_ATTR_VENDOR = "Vendor"
|
||||
DBUS_ATTR_VERSION = "Version"
|
||||
DBUS_ATTR_VIRTUALIZATION = "Virtualization"
|
||||
DBUS_ATTR_WHAT = "What"
|
||||
DBUS_ATTR_WWN = "WWN"
|
||||
|
||||
|
|
|
@ -17,7 +17,7 @@ from .rauc import Rauc
|
|||
from .resolved import Resolved
|
||||
from .systemd import Systemd
|
||||
from .timedate import TimeDate
|
||||
from .udisks2 import UDisks2
|
||||
from .udisks2 import UDisks2Manager
|
||||
|
||||
_LOGGER: logging.Logger = logging.getLogger(__name__)
|
||||
|
||||
|
@ -37,7 +37,7 @@ class DBusManager(CoreSysAttributes):
|
|||
self._agent: OSAgent = OSAgent()
|
||||
self._timedate: TimeDate = TimeDate()
|
||||
self._resolved: Resolved = Resolved()
|
||||
self._udisks2: UDisks2 = UDisks2()
|
||||
self._udisks2: UDisks2Manager = UDisks2Manager()
|
||||
self._bus: MessageBus | None = None
|
||||
|
||||
@property
|
||||
|
@ -81,7 +81,7 @@ class DBusManager(CoreSysAttributes):
|
|||
return self._resolved
|
||||
|
||||
@property
|
||||
def udisks2(self) -> UDisks2:
|
||||
def udisks2(self) -> UDisks2Manager:
|
||||
"""Return the udisks2 interface."""
|
||||
return self._udisks2
|
||||
|
||||
|
|
|
@ -37,8 +37,8 @@ def get_connection_from_interface(
|
|||
# Generate/Update ID/name
|
||||
if not name or not name.startswith("Supervisor"):
|
||||
name = f"Supervisor {interface.name}"
|
||||
if interface.type == InterfaceType.VLAN:
|
||||
name = f"{name}.{interface.vlan.id}"
|
||||
if interface.type == InterfaceType.VLAN:
|
||||
name = f"{name}.{interface.vlan.id}"
|
||||
|
||||
if interface.type == InterfaceType.ETHERNET:
|
||||
iftype = "802-3-ethernet"
|
||||
|
|
|
@ -1,6 +1,8 @@
|
|||
"""D-Bus interface for rauc."""
|
||||
|
||||
from ctypes import c_uint32, c_uint64
|
||||
import logging
|
||||
from typing import Any
|
||||
from typing import Any, NotRequired, TypedDict
|
||||
|
||||
from dbus_fast.aio.message_bus import MessageBus
|
||||
|
||||
|
@ -23,6 +25,28 @@ from .utils import dbus_connected
|
|||
|
||||
_LOGGER: logging.Logger = logging.getLogger(__name__)
|
||||
|
||||
SlotStatusDataType = TypedDict(
|
||||
"SlotStatusDataType",
|
||||
{
|
||||
"class": str,
|
||||
"type": str,
|
||||
"state": str,
|
||||
"device": str,
|
||||
"bundle.compatible": NotRequired[str],
|
||||
"sha256": NotRequired[str],
|
||||
"size": NotRequired[c_uint64],
|
||||
"installed.count": NotRequired[c_uint32],
|
||||
"bundle.version": NotRequired[str],
|
||||
"installed.timestamp": NotRequired[str],
|
||||
"status": NotRequired[str],
|
||||
"activated.count": NotRequired[c_uint32],
|
||||
"activated.timestamp": NotRequired[str],
|
||||
"boot-status": NotRequired[str],
|
||||
"bootname": NotRequired[str],
|
||||
"parent": NotRequired[str],
|
||||
},
|
||||
)
|
||||
|
||||
|
||||
class Rauc(DBusInterfaceProxy):
|
||||
"""Handle D-Bus interface for rauc."""
|
||||
|
@ -83,7 +107,7 @@ class Rauc(DBusInterfaceProxy):
|
|||
await self.dbus.Installer.call_install(str(raucb_file))
|
||||
|
||||
@dbus_connected
|
||||
async def get_slot_status(self) -> list[tuple[str, dict[str, Any]]]:
|
||||
async def get_slot_status(self) -> list[tuple[str, SlotStatusDataType]]:
|
||||
"""Get slot status."""
|
||||
return await self.dbus.Installer.call_get_slot_status()
|
||||
|
||||
|
|
|
@ -20,6 +20,7 @@ from .const import (
|
|||
DBUS_ATTR_KERNEL_TIMESTAMP_MONOTONIC,
|
||||
DBUS_ATTR_LOADER_TIMESTAMP_MONOTONIC,
|
||||
DBUS_ATTR_USERSPACE_TIMESTAMP_MONOTONIC,
|
||||
DBUS_ATTR_VIRTUALIZATION,
|
||||
DBUS_ERR_SYSTEMD_NO_SUCH_UNIT,
|
||||
DBUS_IFACE_SYSTEMD_MANAGER,
|
||||
DBUS_NAME_SYSTEMD,
|
||||
|
@ -114,6 +115,12 @@ class Systemd(DBusInterfaceProxy):
|
|||
"""Return the boot timestamp."""
|
||||
return self.properties[DBUS_ATTR_FINISH_TIMESTAMP]
|
||||
|
||||
@property
|
||||
@dbus_property
|
||||
def virtualization(self) -> str:
|
||||
"""Return virtualization hypervisor being used."""
|
||||
return self.properties[DBUS_ATTR_VIRTUALIZATION]
|
||||
|
||||
@dbus_connected
|
||||
async def reboot(self) -> None:
|
||||
"""Reboot host computer."""
|
||||
|
|
|
@ -15,12 +15,15 @@ from ...exceptions import (
|
|||
from ..const import (
|
||||
DBUS_ATTR_SUPPORTED_FILESYSTEMS,
|
||||
DBUS_ATTR_VERSION,
|
||||
DBUS_IFACE_BLOCK,
|
||||
DBUS_IFACE_DRIVE,
|
||||
DBUS_IFACE_UDISKS2_MANAGER,
|
||||
DBUS_NAME_UDISKS2,
|
||||
DBUS_OBJECT_BASE,
|
||||
DBUS_OBJECT_UDISKS2,
|
||||
DBUS_OBJECT_UDISKS2_MANAGER,
|
||||
)
|
||||
from ..interface import DBusInterfaceProxy, dbus_property
|
||||
from ..interface import DBusInterface, DBusInterfaceProxy, dbus_property
|
||||
from ..utils import dbus_connected
|
||||
from .block import UDisks2Block
|
||||
from .const import UDISKS2_DEFAULT_OPTIONS
|
||||
|
@ -30,7 +33,15 @@ from .drive import UDisks2Drive
|
|||
_LOGGER: logging.Logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class UDisks2(DBusInterfaceProxy):
|
||||
class UDisks2(DBusInterface):
|
||||
"""Handle D-Bus interface for UDisks2 root object."""
|
||||
|
||||
name: str = DBUS_NAME_UDISKS2
|
||||
bus_name: str = DBUS_NAME_UDISKS2
|
||||
object_path: str = DBUS_OBJECT_UDISKS2
|
||||
|
||||
|
||||
class UDisks2Manager(DBusInterfaceProxy):
|
||||
"""Handle D-Bus interface for UDisks2.
|
||||
|
||||
http://storaged.org/doc/udisks2-api/latest/
|
||||
|
@ -38,22 +49,36 @@ class UDisks2(DBusInterfaceProxy):
|
|||
|
||||
name: str = DBUS_NAME_UDISKS2
|
||||
bus_name: str = DBUS_NAME_UDISKS2
|
||||
object_path: str = DBUS_OBJECT_UDISKS2
|
||||
object_path: str = DBUS_OBJECT_UDISKS2_MANAGER
|
||||
properties_interface: str = DBUS_IFACE_UDISKS2_MANAGER
|
||||
|
||||
_block_devices: dict[str, UDisks2Block] = {}
|
||||
_drives: dict[str, UDisks2Drive] = {}
|
||||
|
||||
def __init__(self):
|
||||
"""Initialize object."""
|
||||
super().__init__()
|
||||
self.udisks2_object_manager = UDisks2()
|
||||
|
||||
async def connect(self, bus: MessageBus):
|
||||
"""Connect to D-Bus."""
|
||||
try:
|
||||
await super().connect(bus)
|
||||
await self.udisks2_object_manager.connect(bus)
|
||||
except DBusError:
|
||||
_LOGGER.warning("Can't connect to udisks2")
|
||||
except (DBusServiceUnkownError, DBusInterfaceError):
|
||||
_LOGGER.warning(
|
||||
"No udisks2 support on the host. Host control has been disabled."
|
||||
)
|
||||
else:
|
||||
# Register for signals on devices added/removed
|
||||
self.udisks2_object_manager.dbus.object_manager.on_interfaces_added(
|
||||
self._interfaces_added
|
||||
)
|
||||
self.udisks2_object_manager.dbus.object_manager.on_interfaces_removed(
|
||||
self._interfaces_removed
|
||||
)
|
||||
|
||||
@dbus_connected
|
||||
async def update(self, changed: dict[str, Any] | None = None) -> None:
|
||||
|
@ -161,11 +186,47 @@ class UDisks2(DBusInterfaceProxy):
|
|||
]
|
||||
)
|
||||
|
||||
async def _interfaces_added(
|
||||
self, object_path: str, properties: dict[str, dict[str, Any]]
|
||||
) -> None:
|
||||
"""Interfaces added to a UDisks2 object."""
|
||||
if object_path in self._block_devices:
|
||||
await self._block_devices[object_path].update()
|
||||
return
|
||||
if object_path in self._drives:
|
||||
await self._drives[object_path].update()
|
||||
return
|
||||
|
||||
if DBUS_IFACE_BLOCK in properties:
|
||||
self._block_devices[object_path] = await UDisks2Block.new(
|
||||
object_path, self.dbus.bus
|
||||
)
|
||||
return
|
||||
|
||||
if DBUS_IFACE_DRIVE in properties:
|
||||
self._drives[object_path] = await UDisks2Drive.new(
|
||||
object_path, self.dbus.bus
|
||||
)
|
||||
|
||||
async def _interfaces_removed(
|
||||
self, object_path: str, interfaces: list[str]
|
||||
) -> None:
|
||||
"""Interfaces removed from a UDisks2 object."""
|
||||
if object_path in self._block_devices and DBUS_IFACE_BLOCK in interfaces:
|
||||
self._block_devices[object_path].shutdown()
|
||||
del self._block_devices[object_path]
|
||||
return
|
||||
|
||||
if object_path in self._drives and DBUS_IFACE_DRIVE in interfaces:
|
||||
self._drives[object_path].shutdown()
|
||||
del self._drives[object_path]
|
||||
|
||||
def shutdown(self) -> None:
|
||||
"""Shutdown the object and disconnect from D-Bus.
|
||||
|
||||
This method is irreversible.
|
||||
"""
|
||||
self.udisks2_object_manager.shutdown()
|
||||
for block_device in self.block_devices:
|
||||
block_device.shutdown()
|
||||
for drive in self.drives:
|
||||
|
|
|
@ -7,14 +7,12 @@ from typing import TYPE_CHECKING, Any
|
|||
from uuid import UUID, uuid4
|
||||
|
||||
import attr
|
||||
import voluptuous as vol
|
||||
from voluptuous.humanize import humanize_error
|
||||
|
||||
from ..const import ATTR_CONFIG, ATTR_DISCOVERY, FILE_HASSIO_DISCOVERY
|
||||
from ..coresys import CoreSys, CoreSysAttributes
|
||||
from ..exceptions import DiscoveryError, HomeAssistantAPIError
|
||||
from ..exceptions import HomeAssistantAPIError
|
||||
from ..utils.common import FileConfiguration
|
||||
from .validate import SCHEMA_DISCOVERY_CONFIG, valid_discovery_config
|
||||
from .validate import SCHEMA_DISCOVERY_CONFIG
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from ..addons.addon import Addon
|
||||
|
@ -75,12 +73,6 @@ class Discovery(CoreSysAttributes, FileConfiguration):
|
|||
|
||||
def send(self, addon: Addon, service: str, config: dict[str, Any]) -> Message:
|
||||
"""Send a discovery message to Home Assistant."""
|
||||
try:
|
||||
config = valid_discovery_config(service, config)
|
||||
except vol.Invalid as err:
|
||||
_LOGGER.error("Invalid discovery %s config", humanize_error(config, err))
|
||||
raise DiscoveryError() from err
|
||||
|
||||
# Create message
|
||||
message = Message(addon.slug, service, config)
|
||||
|
||||
|
|
|
@ -1 +0,0 @@
|
|||
"""Discovery service modules."""
|
|
@ -1,9 +0,0 @@
|
|||
"""Discovery service for AdGuard."""
|
||||
import voluptuous as vol
|
||||
|
||||
from ...validate import network_port
|
||||
from ..const import ATTR_HOST, ATTR_PORT
|
||||
|
||||
SCHEMA = vol.Schema(
|
||||
{vol.Required(ATTR_HOST): str, vol.Required(ATTR_PORT): network_port}
|
||||
)
|
|
@ -1,9 +0,0 @@
|
|||
"""Discovery service for Almond."""
|
||||
import voluptuous as vol
|
||||
|
||||
from ...validate import network_port
|
||||
from ..const import ATTR_HOST, ATTR_PORT
|
||||
|
||||
SCHEMA = vol.Schema(
|
||||
{vol.Required(ATTR_HOST): str, vol.Required(ATTR_PORT): network_port}
|
||||
)
|
|
@ -1,14 +0,0 @@
|
|||
"""Discovery service for MQTT."""
|
||||
import voluptuous as vol
|
||||
|
||||
from ...validate import network_port
|
||||
from ..const import ATTR_API_KEY, ATTR_HOST, ATTR_PORT, ATTR_SERIAL
|
||||
|
||||
SCHEMA = vol.Schema(
|
||||
{
|
||||
vol.Required(ATTR_HOST): str,
|
||||
vol.Required(ATTR_PORT): network_port,
|
||||
vol.Required(ATTR_SERIAL): str,
|
||||
vol.Required(ATTR_API_KEY): str,
|
||||
}
|
||||
)
|
|
@ -1,9 +0,0 @@
|
|||
"""Discovery service for the ESPHome Dashboard."""
|
||||
import voluptuous as vol
|
||||
|
||||
from ...validate import network_port
|
||||
from ..const import ATTR_HOST, ATTR_PORT
|
||||
|
||||
SCHEMA = vol.Schema(
|
||||
{vol.Required(ATTR_HOST): str, vol.Required(ATTR_PORT): network_port}
|
||||
)
|
|
@ -1,16 +0,0 @@
|
|||
"""Discovery service for HomeMatic."""
|
||||
import voluptuous as vol
|
||||
|
||||
from ...validate import network_port
|
||||
from ..const import ATTR_HOST, ATTR_PORT
|
||||
|
||||
SCHEMA = vol.Schema(
|
||||
{
|
||||
str: vol.Schema(
|
||||
{
|
||||
vol.Required(ATTR_HOST): str,
|
||||
vol.Required(ATTR_PORT): network_port,
|
||||
}
|
||||
)
|
||||
}
|
||||
)
|
|
@ -1,13 +0,0 @@
|
|||
"""Discovery service for Matter Server."""
|
||||
import voluptuous as vol
|
||||
|
||||
from ...validate import network_port
|
||||
from ..const import ATTR_HOST, ATTR_PORT
|
||||
|
||||
# pylint: disable=no-value-for-parameter
|
||||
SCHEMA = vol.Schema(
|
||||
{
|
||||
vol.Required(ATTR_HOST): str,
|
||||
vol.Required(ATTR_PORT): network_port,
|
||||
}
|
||||
)
|
|
@ -1,6 +0,0 @@
|
|||
"""Discovery service for motionEye."""
|
||||
import voluptuous as vol
|
||||
|
||||
from ..const import ATTR_URL
|
||||
|
||||
SCHEMA = vol.Schema({vol.Required(ATTR_URL): str})
|
|
@ -1,26 +0,0 @@
|
|||
"""Discovery service for MQTT."""
|
||||
import voluptuous as vol
|
||||
|
||||
from ...validate import network_port
|
||||
from ..const import (
|
||||
ATTR_HOST,
|
||||
ATTR_PASSWORD,
|
||||
ATTR_PORT,
|
||||
ATTR_PROTOCOL,
|
||||
ATTR_SSL,
|
||||
ATTR_USERNAME,
|
||||
)
|
||||
|
||||
# pylint: disable=no-value-for-parameter
|
||||
SCHEMA = vol.Schema(
|
||||
{
|
||||
vol.Required(ATTR_HOST): str,
|
||||
vol.Required(ATTR_PORT): network_port,
|
||||
vol.Optional(ATTR_USERNAME): str,
|
||||
vol.Optional(ATTR_PASSWORD): str,
|
||||
vol.Optional(ATTR_SSL, default=False): vol.Boolean(),
|
||||
vol.Optional(ATTR_PROTOCOL, default="3.1.1"): vol.All(
|
||||
str, vol.In(["3.1", "3.1.1"])
|
||||
),
|
||||
}
|
||||
)
|
|
@ -1,13 +0,0 @@
|
|||
"""Discovery service for OpenThread Border Router."""
|
||||
import voluptuous as vol
|
||||
|
||||
from ...validate import network_port
|
||||
from ..const import ATTR_HOST, ATTR_PORT
|
||||
|
||||
# pylint: disable=no-value-for-parameter
|
||||
SCHEMA = vol.Schema(
|
||||
{
|
||||
vol.Required(ATTR_HOST): str,
|
||||
vol.Required(ATTR_PORT): network_port,
|
||||
}
|
||||
)
|
|
@ -1,15 +0,0 @@
|
|||
"""Discovery service for OpenZwave MQTT."""
|
||||
import voluptuous as vol
|
||||
|
||||
from ...validate import network_port
|
||||
from ..const import ATTR_HOST, ATTR_PASSWORD, ATTR_PORT, ATTR_USERNAME
|
||||
|
||||
# pylint: disable=no-value-for-parameter
|
||||
SCHEMA = vol.Schema(
|
||||
{
|
||||
vol.Required(ATTR_HOST): str,
|
||||
vol.Required(ATTR_PORT): network_port,
|
||||
vol.Required(ATTR_USERNAME): str,
|
||||
vol.Required(ATTR_PASSWORD): str,
|
||||
}
|
||||
)
|
|
@ -1,9 +0,0 @@
|
|||
"""Discovery service for RTSPtoWebRTC."""
|
||||
import voluptuous as vol
|
||||
|
||||
from ...validate import network_port
|
||||
from ..const import ATTR_HOST, ATTR_PORT
|
||||
|
||||
SCHEMA = vol.Schema(
|
||||
{vol.Required(ATTR_HOST): str, vol.Required(ATTR_PORT): network_port}
|
||||
)
|
|
@ -1,9 +0,0 @@
|
|||
"""Discovery service for UniFi."""
|
||||
import voluptuous as vol
|
||||
|
||||
from ...validate import network_port
|
||||
from ..const import ATTR_HOST, ATTR_PORT
|
||||
|
||||
SCHEMA = vol.Schema(
|
||||
{vol.Required(ATTR_HOST): str, vol.Required(ATTR_PORT): network_port}
|
||||
)
|
|
@ -1,14 +0,0 @@
|
|||
"""Discovery service for VLC Telnet."""
|
||||
import voluptuous as vol
|
||||
|
||||
from ...validate import network_port
|
||||
from ..const import ATTR_HOST, ATTR_PASSWORD, ATTR_PORT
|
||||
|
||||
# pylint: disable=no-value-for-parameter
|
||||
SCHEMA = vol.Schema(
|
||||
{
|
||||
vol.Required(ATTR_HOST): str,
|
||||
vol.Required(ATTR_PORT): network_port,
|
||||
vol.Required(ATTR_PASSWORD): str,
|
||||
}
|
||||
)
|
|
@ -1,25 +0,0 @@
|
|||
"""Discovery service for the Wyoming Protocol integration."""
|
||||
from typing import Any, cast
|
||||
from urllib.parse import urlparse
|
||||
|
||||
import voluptuous as vol
|
||||
|
||||
from ..const import ATTR_URI
|
||||
|
||||
|
||||
def validate_uri(value: Any) -> str:
|
||||
"""Validate an Wyoming URI.
|
||||
|
||||
Currently accepts TCP URIs, can extended
|
||||
to accept UNIX sockets in the future.
|
||||
"""
|
||||
uri_value = str(value)
|
||||
|
||||
if urlparse(uri_value).scheme == "tcp":
|
||||
# pylint: disable-next=no-value-for-parameter
|
||||
return cast(str, vol.Schema(vol.Url())(uri_value))
|
||||
|
||||
raise vol.Invalid("invalid Wyoming Protocol URI")
|
||||
|
||||
|
||||
SCHEMA = vol.Schema({vol.Required(ATTR_URI): validate_uri})
|
|
@ -1,13 +0,0 @@
|
|||
"""Discovery service for Zwave JS."""
|
||||
import voluptuous as vol
|
||||
|
||||
from ...validate import network_port
|
||||
from ..const import ATTR_HOST, ATTR_PORT
|
||||
|
||||
# pylint: disable=no-value-for-parameter
|
||||
SCHEMA = vol.Schema(
|
||||
{
|
||||
vol.Required(ATTR_HOST): str,
|
||||
vol.Required(ATTR_PORT): network_port,
|
||||
}
|
||||
)
|
|
@ -1,6 +1,4 @@
|
|||
"""Validate services schema."""
|
||||
from importlib import import_module
|
||||
from pathlib import Path
|
||||
|
||||
import voluptuous as vol
|
||||
|
||||
|
@ -8,25 +6,6 @@ from ..const import ATTR_ADDON, ATTR_CONFIG, ATTR_DISCOVERY, ATTR_SERVICE, ATTR_
|
|||
from ..utils.validate import schema_or
|
||||
from ..validate import uuid_match
|
||||
|
||||
|
||||
def valid_discovery_service(service):
|
||||
"""Validate service name."""
|
||||
service_file = Path(__file__).parent.joinpath(f"services/{service}.py")
|
||||
if not service_file.exists():
|
||||
raise vol.Invalid(f"Service {service} not found") from None
|
||||
return service
|
||||
|
||||
|
||||
def valid_discovery_config(service, config):
|
||||
"""Validate service name."""
|
||||
try:
|
||||
service_mod = import_module(f".services.{service}", "supervisor.discovery")
|
||||
except ImportError:
|
||||
raise vol.Invalid(f"Service {service} not found") from None
|
||||
|
||||
return service_mod.SCHEMA(config)
|
||||
|
||||
|
||||
SCHEMA_DISCOVERY = vol.Schema(
|
||||
[
|
||||
vol.Schema(
|
||||
|
|
|
@ -641,11 +641,11 @@ class DockerAddon(DockerInterface):
|
|||
) -> None:
|
||||
"""Pull Docker image or build it."""
|
||||
if need_build is None and self.addon.need_build or need_build:
|
||||
await self._build(version)
|
||||
await self._build(version, image)
|
||||
else:
|
||||
await super().install(version, image, latest, arch)
|
||||
|
||||
async def _build(self, version: AwesomeVersion) -> None:
|
||||
async def _build(self, version: AwesomeVersion, image: str | None = None) -> None:
|
||||
"""Build a Docker container."""
|
||||
build_env = AddonBuild(self.coresys, self.addon)
|
||||
if not build_env.is_valid:
|
||||
|
@ -657,7 +657,7 @@ class DockerAddon(DockerInterface):
|
|||
image, log = await self.sys_run_in_executor(
|
||||
self.sys_docker.images.build,
|
||||
use_config_proxy=False,
|
||||
**build_env.get_docker_args(version),
|
||||
**build_env.get_docker_args(version, image),
|
||||
)
|
||||
|
||||
_LOGGER.debug("Build %s:%s done: %s", self.image, version, log)
|
||||
|
|
|
@ -74,6 +74,7 @@ MOUNT_DBUS = Mount(
|
|||
type=MountType.BIND, source="/run/dbus", target="/run/dbus", read_only=True
|
||||
)
|
||||
MOUNT_DEV = Mount(type=MountType.BIND, source="/dev", target="/dev", read_only=True)
|
||||
MOUNT_DEV.setdefault("BindOptions", {})["ReadOnlyNonRecursive"] = True
|
||||
MOUNT_DOCKER = Mount(
|
||||
type=MountType.BIND,
|
||||
source="/run/docker.sock",
|
||||
|
|
|
@ -2,6 +2,7 @@
|
|||
from collections.abc import Awaitable
|
||||
from ipaddress import IPv4Address
|
||||
import logging
|
||||
import re
|
||||
|
||||
from awesomeversion import AwesomeVersion, AwesomeVersionCompareException
|
||||
from docker.types import Mount
|
||||
|
@ -28,6 +29,7 @@ from .interface import CommandReturn, DockerInterface
|
|||
_LOGGER: logging.Logger = logging.getLogger(__name__)
|
||||
_VERIFY_TRUST: AwesomeVersion = AwesomeVersion("2021.5.0")
|
||||
_HASS_DOCKER_NAME: str = "homeassistant"
|
||||
ENV_S6_GRACETIME = re.compile(r"^S6_SERVICES_GRACETIME=([0-9]+)$")
|
||||
|
||||
|
||||
class DockerHomeAssistant(DockerInterface):
|
||||
|
@ -53,10 +55,15 @@ class DockerHomeAssistant(DockerInterface):
|
|||
@property
|
||||
def timeout(self) -> int:
|
||||
"""Return timeout for Docker actions."""
|
||||
# Synchronized with the homeassistant core container's S6_SERVICES_GRACETIME
|
||||
# to avoid killing Home Assistant Core, see
|
||||
# Use S6_SERVICES_GRACETIME to avoid killing Home Assistant Core, see
|
||||
# https://github.com/home-assistant/core/tree/dev/Dockerfile
|
||||
return 240 + 20
|
||||
if self.meta_config and "Env" in self.meta_config:
|
||||
for env in self.meta_config["Env"]:
|
||||
if match := ENV_S6_GRACETIME.match(env):
|
||||
return 20 + int(int(match.group(1)) / 1000)
|
||||
|
||||
# Fallback - as of 2024.3, S6 SERVICES_GRACETIME was set to 24000
|
||||
return 260
|
||||
|
||||
@property
|
||||
def ip_address(self) -> IPv4Address:
|
||||
|
|
|
@ -14,6 +14,7 @@ from awesomeversion import AwesomeVersion
|
|||
from awesomeversion.strategy import AwesomeVersionStrategy
|
||||
import docker
|
||||
from docker.models.containers import Container
|
||||
from docker.models.images import Image
|
||||
import requests
|
||||
|
||||
from ..const import (
|
||||
|
@ -438,6 +439,44 @@ class DockerInterface(JobGroup):
|
|||
)
|
||||
self._meta = None
|
||||
|
||||
@Job(
|
||||
name="docker_interface_check_image",
|
||||
limit=JobExecutionLimit.GROUP_ONCE,
|
||||
on_condition=DockerJobError,
|
||||
)
|
||||
async def check_image(
|
||||
self,
|
||||
version: AwesomeVersion,
|
||||
expected_image: str,
|
||||
expected_arch: CpuArch | None = None,
|
||||
) -> None:
|
||||
"""Check we have expected image with correct arch."""
|
||||
expected_arch = expected_arch or self.sys_arch.supervisor
|
||||
image_name = f"{expected_image}:{version!s}"
|
||||
if self.image == expected_image:
|
||||
try:
|
||||
image: Image = await self.sys_run_in_executor(
|
||||
self.sys_docker.images.get, image_name
|
||||
)
|
||||
except (docker.errors.DockerException, requests.RequestException) as err:
|
||||
raise DockerError(
|
||||
f"Could not get {image_name} for check due to: {err!s}",
|
||||
_LOGGER.error,
|
||||
) from err
|
||||
|
||||
image_arch = f"{image.attrs['Os']}/{image.attrs['Architecture']}"
|
||||
if "Variant" in image.attrs:
|
||||
image_arch = f"{image_arch}/{image.attrs['Variant']}"
|
||||
|
||||
# If we have an image and its the right arch, all set
|
||||
if MAP_ARCH[expected_arch] == image_arch:
|
||||
return
|
||||
|
||||
# We're missing the image we need. Stop and clean up what we have then pull the right one
|
||||
with suppress(DockerError):
|
||||
await self.remove()
|
||||
await self.install(version, expected_image, arch=expected_arch)
|
||||
|
||||
@Job(
|
||||
name="docker_interface_update",
|
||||
limit=JobExecutionLimit.GROUP_ONCE,
|
||||
|
|
|
@ -177,6 +177,11 @@ class DockerAPI:
|
|||
if dns:
|
||||
kwargs["dns"] = [str(self.network.dns)]
|
||||
kwargs["dns_search"] = [DNS_SUFFIX]
|
||||
# CoreDNS forward plug-in fails in ~6s, then fallback triggers.
|
||||
# However, the default timeout of glibc and musl is 5s. Increase
|
||||
# default timeout to make sure CoreDNS fallback is working
|
||||
# on first query.
|
||||
kwargs["dns_opt"] = ["timeout:10"]
|
||||
if hostname:
|
||||
kwargs["domainname"] = DNS_SUFFIX
|
||||
|
||||
|
|
|
@ -133,6 +133,14 @@ class HassOSDataDiskError(HassOSError):
|
|||
"""Issues with the DataDisk feature from HAOS."""
|
||||
|
||||
|
||||
class HassOSSlotNotFound(HassOSError):
|
||||
"""Could not find boot slot."""
|
||||
|
||||
|
||||
class HassOSSlotUpdateError(HassOSError):
|
||||
"""Error while updating a slot via rauc."""
|
||||
|
||||
|
||||
# All Plugins
|
||||
|
||||
|
||||
|
@ -267,6 +275,10 @@ class AuthPasswordResetError(HassioError):
|
|||
"""Auth error if password reset failed."""
|
||||
|
||||
|
||||
class AuthListUsersError(HassioError):
|
||||
"""Auth error if listing users failed."""
|
||||
|
||||
|
||||
# Host
|
||||
|
||||
|
||||
|
@ -304,6 +316,8 @@ class HostLogError(HostError):
|
|||
class APIError(HassioError, RuntimeError):
|
||||
"""API errors."""
|
||||
|
||||
status = 400
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
message: str | None = None,
|
||||
|
@ -318,6 +332,8 @@ class APIError(HassioError, RuntimeError):
|
|||
class APIForbidden(APIError):
|
||||
"""API forbidden error."""
|
||||
|
||||
status = 403
|
||||
|
||||
|
||||
class APIAddonNotInstalled(APIError):
|
||||
"""Not installed addon requested at addons API."""
|
||||
|
@ -493,6 +509,17 @@ class WhoamiConnectivityError(WhoamiError):
|
|||
"""Connectivity errors while using whoami."""
|
||||
|
||||
|
||||
# utils/systemd_journal
|
||||
|
||||
|
||||
class SystemdJournalError(HassioError):
|
||||
"""Error while processing systemd journal logs."""
|
||||
|
||||
|
||||
class MalformedBinaryEntryError(SystemdJournalError):
|
||||
"""Raised when binary entry in the journal isn't followed by a newline."""
|
||||
|
||||
|
||||
# docker/api
|
||||
|
||||
|
||||
|
|
|
@ -1,16 +1,19 @@
|
|||
"""Constants for homeassistant."""
|
||||
from datetime import timedelta
|
||||
from enum import StrEnum
|
||||
from pathlib import PurePath
|
||||
|
||||
from awesomeversion import AwesomeVersion
|
||||
|
||||
from ..const import CoreState
|
||||
|
||||
ATTR_OVERRIDE_IMAGE = "override_image"
|
||||
LANDINGPAGE: AwesomeVersion = AwesomeVersion("landingpage")
|
||||
WATCHDOG_RETRY_SECONDS = 10
|
||||
WATCHDOG_MAX_ATTEMPTS = 5
|
||||
WATCHDOG_THROTTLE_PERIOD = timedelta(minutes=30)
|
||||
WATCHDOG_THROTTLE_MAX_CALLS = 10
|
||||
SAFE_MODE_FILENAME = PurePath("safe-mode")
|
||||
|
||||
CLOSING_STATES = [
|
||||
CoreState.SHUTDOWN,
|
||||
|
|
|
@ -35,6 +35,7 @@ from ..utils import convert_to_ascii
|
|||
from ..utils.sentry import capture_exception
|
||||
from .const import (
|
||||
LANDINGPAGE,
|
||||
SAFE_MODE_FILENAME,
|
||||
WATCHDOG_MAX_ATTEMPTS,
|
||||
WATCHDOG_RETRY_SECONDS,
|
||||
WATCHDOG_THROTTLE_MAX_CALLS,
|
||||
|
@ -86,7 +87,16 @@ class HomeAssistantCore(JobGroup):
|
|||
await self.instance.get_latest_version()
|
||||
)
|
||||
|
||||
await self.instance.attach(version=self.sys_homeassistant.version)
|
||||
await self.instance.attach(
|
||||
version=self.sys_homeassistant.version, skip_state_event_if_down=True
|
||||
)
|
||||
|
||||
# Ensure we are using correct image for this system (unless user has overridden it)
|
||||
if not self.sys_homeassistant.override_image:
|
||||
await self.instance.check_image(
|
||||
self.sys_homeassistant.version, self.sys_homeassistant.default_image
|
||||
)
|
||||
self.sys_homeassistant.image = self.sys_homeassistant.default_image
|
||||
except DockerError:
|
||||
_LOGGER.info(
|
||||
"No Home Assistant Docker image %s found.", self.sys_homeassistant.image
|
||||
|
@ -115,7 +125,9 @@ class HomeAssistantCore(JobGroup):
|
|||
"""Install a landing page."""
|
||||
# Try to use a preinstalled landingpage
|
||||
try:
|
||||
await self.instance.attach(version=LANDINGPAGE)
|
||||
await self.instance.attach(
|
||||
version=LANDINGPAGE, skip_state_event_if_down=True
|
||||
)
|
||||
except DockerError:
|
||||
pass
|
||||
else:
|
||||
|
@ -351,8 +363,14 @@ class HomeAssistantCore(JobGroup):
|
|||
limit=JobExecutionLimit.GROUP_ONCE,
|
||||
on_condition=HomeAssistantJobError,
|
||||
)
|
||||
async def restart(self) -> None:
|
||||
async def restart(self, *, safe_mode: bool = False) -> None:
|
||||
"""Restart Home Assistant Docker."""
|
||||
# Create safe mode marker file if necessary
|
||||
if safe_mode:
|
||||
await self.sys_run_in_executor(
|
||||
(self.sys_config.path_homeassistant / SAFE_MODE_FILENAME).touch
|
||||
)
|
||||
|
||||
try:
|
||||
await self.instance.restart()
|
||||
except DockerError as err:
|
||||
|
|
|
@ -48,7 +48,7 @@ from ..utils import remove_folder
|
|||
from ..utils.common import FileConfiguration
|
||||
from ..utils.json import read_json_file, write_json_file
|
||||
from .api import HomeAssistantAPI
|
||||
from .const import WSType
|
||||
from .const import ATTR_OVERRIDE_IMAGE, LANDINGPAGE, WSType
|
||||
from .core import HomeAssistantCore
|
||||
from .secrets import HomeAssistantSecrets
|
||||
from .validate import SCHEMA_HASS_CONFIG
|
||||
|
@ -170,18 +170,33 @@ class HomeAssistant(FileConfiguration, CoreSysAttributes):
|
|||
"""Return last available version of Home Assistant."""
|
||||
return self.sys_updater.version_homeassistant
|
||||
|
||||
@property
|
||||
def default_image(self) -> str:
|
||||
"""Return the default image for this system."""
|
||||
return f"ghcr.io/home-assistant/{self.sys_machine}-homeassistant"
|
||||
|
||||
@property
|
||||
def image(self) -> str:
|
||||
"""Return image name of the Home Assistant container."""
|
||||
if self._data.get(ATTR_IMAGE):
|
||||
return self._data[ATTR_IMAGE]
|
||||
return f"ghcr.io/home-assistant/{self.sys_machine}-homeassistant"
|
||||
return self.default_image
|
||||
|
||||
@image.setter
|
||||
def image(self, value: str | None) -> None:
|
||||
"""Set image name of Home Assistant container."""
|
||||
self._data[ATTR_IMAGE] = value
|
||||
|
||||
@property
|
||||
def override_image(self) -> bool:
|
||||
"""Return if user has overridden the image to use for Home Assistant."""
|
||||
return self._data[ATTR_OVERRIDE_IMAGE]
|
||||
|
||||
@override_image.setter
|
||||
def override_image(self, value: bool) -> None:
|
||||
"""Enable/disable image override."""
|
||||
self._data[ATTR_OVERRIDE_IMAGE] = value
|
||||
|
||||
@property
|
||||
def version(self) -> AwesomeVersion | None:
|
||||
"""Return version of local version."""
|
||||
|
@ -313,6 +328,7 @@ class HomeAssistant(FileConfiguration, CoreSysAttributes):
|
|||
if (
|
||||
not self.sys_hardware.policy.is_match_cgroup(PolicyGroup.UART, device)
|
||||
or not self.version
|
||||
or self.version == LANDINGPAGE
|
||||
or self.version < "2021.9.0"
|
||||
):
|
||||
return
|
||||
|
|
|
@ -18,6 +18,7 @@ from ..const import (
|
|||
ATTR_WATCHDOG,
|
||||
)
|
||||
from ..validate import docker_image, network_port, token, uuid_match, version_tag
|
||||
from .const import ATTR_OVERRIDE_IMAGE
|
||||
|
||||
# pylint: disable=no-value-for-parameter
|
||||
SCHEMA_HASS_CONFIG = vol.Schema(
|
||||
|
@ -34,6 +35,7 @@ SCHEMA_HASS_CONFIG = vol.Schema(
|
|||
vol.Optional(ATTR_AUDIO_OUTPUT, default=None): vol.Maybe(str),
|
||||
vol.Optional(ATTR_AUDIO_INPUT, default=None): vol.Maybe(str),
|
||||
vol.Optional(ATTR_BACKUPS_EXCLUDE_DATABASE, default=False): vol.Boolean(),
|
||||
vol.Optional(ATTR_OVERRIDE_IMAGE, default=False): vol.Boolean(),
|
||||
},
|
||||
extra=vol.REMOVE_EXTRA,
|
||||
)
|
||||
|
|
|
@ -62,3 +62,10 @@ class LogFormat(StrEnum):
|
|||
JOURNAL = "application/vnd.fdo.journal"
|
||||
JSON = "application/json"
|
||||
TEXT = "text/plain"
|
||||
|
||||
|
||||
class LogFormatter(StrEnum):
|
||||
"""Log formatter."""
|
||||
|
||||
PLAIN = "plain"
|
||||
VERBOSE = "verbose"
|
||||
|
|
|
@ -129,6 +129,11 @@ class InfoCenter(CoreSysAttributes):
|
|||
self.coresys.config.path_supervisor
|
||||
)
|
||||
|
||||
@property
|
||||
def virtualization(self) -> str | None:
|
||||
"""Return virtualization hypervisor being used."""
|
||||
return self.sys_dbus.systemd.virtualization
|
||||
|
||||
async def get_dmesg(self) -> bytes:
|
||||
"""Return host dmesg output."""
|
||||
proc = await asyncio.create_subprocess_shell(
|
||||
|
|
|
@ -7,12 +7,18 @@ import logging
|
|||
from pathlib import Path
|
||||
|
||||
from aiohttp import ClientError, ClientSession, ClientTimeout
|
||||
from aiohttp.client_exceptions import UnixClientConnectorError
|
||||
from aiohttp.client_reqrep import ClientResponse
|
||||
from aiohttp.connector import UnixConnector
|
||||
from aiohttp.hdrs import ACCEPT, RANGE
|
||||
|
||||
from ..coresys import CoreSys, CoreSysAttributes
|
||||
from ..exceptions import ConfigurationFileError, HostLogError, HostNotSupportedError
|
||||
from ..exceptions import (
|
||||
ConfigurationFileError,
|
||||
HostLogError,
|
||||
HostNotSupportedError,
|
||||
HostServiceError,
|
||||
)
|
||||
from ..utils.json import read_json_file
|
||||
from .const import PARAM_BOOT_ID, PARAM_SYSLOG_IDENTIFIER, LogFormat
|
||||
|
||||
|
@ -138,16 +144,21 @@ class LogsControl(CoreSysAttributes):
|
|||
"No systemd-journal-gatewayd Unix socket available", _LOGGER.error
|
||||
)
|
||||
|
||||
async with ClientSession(
|
||||
connector=UnixConnector(path="/run/systemd-journal-gatewayd.sock")
|
||||
) as session:
|
||||
headers = {ACCEPT: accept}
|
||||
if range_header:
|
||||
headers[RANGE] = range_header
|
||||
async with session.get(
|
||||
f"http://localhost{path}",
|
||||
headers=headers,
|
||||
params=params or {},
|
||||
timeout=timeout,
|
||||
) as client_response:
|
||||
yield client_response
|
||||
try:
|
||||
async with ClientSession(
|
||||
connector=UnixConnector(path=str(SYSTEMD_JOURNAL_GATEWAYD_SOCKET))
|
||||
) as session:
|
||||
headers = {ACCEPT: accept}
|
||||
if range_header:
|
||||
headers[RANGE] = range_header
|
||||
async with session.get(
|
||||
f"http://localhost{path}",
|
||||
headers=headers,
|
||||
params=params or {},
|
||||
timeout=timeout,
|
||||
) as client_response:
|
||||
yield client_response
|
||||
except UnixClientConnectorError as ex:
|
||||
raise HostServiceError(
|
||||
"Unable to connect to systemd-journal-gatewayd", _LOGGER.error
|
||||
) from ex
|
||||
|
|
|
@ -127,6 +127,7 @@ class HostManager(CoreSysAttributes):
|
|||
async def reload(self):
|
||||
"""Reload host functions."""
|
||||
await self.info.update()
|
||||
await self.sys_os.reload()
|
||||
|
||||
if self.sys_dbus.systemd.is_connected:
|
||||
await self.services.update()
|
||||
|
|
|
@ -105,8 +105,9 @@ class Tasks(CoreSysAttributes):
|
|||
addon.version,
|
||||
addon.latest_version,
|
||||
)
|
||||
continue
|
||||
# Delay auto-updates for a day in case of issues
|
||||
if utcnow() + timedelta(days=1) > addon.latest_version_timestamp:
|
||||
if utcnow() < addon.latest_version_timestamp + timedelta(days=1):
|
||||
continue
|
||||
if not addon.test_update_schema():
|
||||
_LOGGER.warning(
|
||||
|
|
|
@ -150,15 +150,10 @@ class MountManager(FileConfiguration, CoreSysAttributes):
|
|||
*[mount.update() for mount in mounts], return_exceptions=True
|
||||
)
|
||||
|
||||
# Try to reload any newly failed mounts and report issues if failure persists
|
||||
new_failures = [
|
||||
mounts[i]
|
||||
for i in range(len(mounts))
|
||||
if results[i] is not True
|
||||
and mounts[i].failed_issue not in self.sys_resolution.issues
|
||||
]
|
||||
# Try to reload failed mounts and report issues if failure persists
|
||||
failures = [mounts[i] for i in range(len(mounts)) if results[i] is not True]
|
||||
await self._mount_errors_to_issues(
|
||||
new_failures, [mount.reload() for mount in new_failures]
|
||||
failures, [self.reload_mount(mount.name) for mount in failures]
|
||||
)
|
||||
|
||||
async def _mount_errors_to_issues(
|
||||
|
@ -170,6 +165,8 @@ class MountManager(FileConfiguration, CoreSysAttributes):
|
|||
for i in range(len(errors)): # pylint: disable=consider-using-enumerate
|
||||
if not errors[i]:
|
||||
continue
|
||||
if mounts[i].failed_issue in self.sys_resolution.issues:
|
||||
continue
|
||||
if not isinstance(errors[i], MountError):
|
||||
capture_exception(errors[i])
|
||||
|
||||
|
|
|
@ -342,20 +342,23 @@ class Mount(CoreSysAttributes, ABC):
|
|||
"Mount %s is not mounted, mounting instead of reloading", self.name
|
||||
)
|
||||
await self.mount()
|
||||
return
|
||||
except DBusError as err:
|
||||
raise MountError(
|
||||
f"Could not reload mount {self.name} due to: {err!s}", _LOGGER.error
|
||||
) from err
|
||||
else:
|
||||
if await self._update_unit():
|
||||
await self._update_state_await(not_state=UnitActiveState.ACTIVATING)
|
||||
|
||||
if await self._update_unit():
|
||||
await self._update_state_await(not_state=UnitActiveState.ACTIVATING)
|
||||
if not await self.is_mounted():
|
||||
raise MountActivationError(
|
||||
f"Reloading {self.name} did not succeed. Check host logs for errors from mount or systemd unit {self.unit_name} for details.",
|
||||
_LOGGER.error,
|
||||
)
|
||||
|
||||
if not await self.is_mounted():
|
||||
raise MountActivationError(
|
||||
f"Reloading {self.name} did not succeed. Check host logs for errors from mount or systemd unit {self.unit_name} for details.",
|
||||
_LOGGER.error,
|
||||
)
|
||||
# If it is mounted now, dismiss corresponding issue if present
|
||||
if self.failed_issue in self.sys_resolution.issues:
|
||||
self.sys_resolution.dismiss_issue(self.failed_issue)
|
||||
|
||||
|
||||
class NetworkMount(Mount, ABC):
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
"""Constants for OS."""
|
||||
|
||||
FILESYSTEM_LABEL_DATA_DISK = "hassos-data"
|
||||
FILESYSTEM_LABEL_DISABLED_DATA_DISK = "hassos-data-dis"
|
||||
FILESYSTEM_LABEL_OLD_DATA_DISK = "hassos-data-old"
|
||||
PARTITION_NAME_EXTERNAL_DATA_DISK = "hassos-data-external"
|
||||
PARTITION_NAME_OLD_EXTERNAL_DATA_DISK = "hassos-data-external-old"
|
||||
|
|
|
@ -1,14 +1,16 @@
|
|||
"""Home Assistant Operating-System DataDisk."""
|
||||
|
||||
import asyncio
|
||||
from contextlib import suppress
|
||||
from dataclasses import dataclass
|
||||
import logging
|
||||
from pathlib import Path
|
||||
from typing import Final
|
||||
from typing import Any, Final
|
||||
|
||||
from awesomeversion import AwesomeVersion
|
||||
|
||||
from ..coresys import CoreSys, CoreSysAttributes
|
||||
from ..dbus.const import DBUS_ATTR_ID_LABEL, DBUS_IFACE_BLOCK
|
||||
from ..dbus.udisks2.block import UDisks2Block
|
||||
from ..dbus.udisks2.const import FormatType
|
||||
from ..dbus.udisks2.drive import UDisks2Drive
|
||||
|
@ -22,8 +24,12 @@ from ..exceptions import (
|
|||
)
|
||||
from ..jobs.const import JobCondition, JobExecutionLimit
|
||||
from ..jobs.decorator import Job
|
||||
from ..resolution.checks.disabled_data_disk import CheckDisabledDataDisk
|
||||
from ..resolution.checks.multiple_data_disks import CheckMultipleDataDisks
|
||||
from ..utils.sentry import capture_exception
|
||||
from .const import (
|
||||
FILESYSTEM_LABEL_DATA_DISK,
|
||||
FILESYSTEM_LABEL_DISABLED_DATA_DISK,
|
||||
PARTITION_NAME_EXTERNAL_DATA_DISK,
|
||||
PARTITION_NAME_OLD_EXTERNAL_DATA_DISK,
|
||||
)
|
||||
|
@ -123,9 +129,9 @@ class DataDisk(CoreSysAttributes):
|
|||
vendor="",
|
||||
model="",
|
||||
serial="",
|
||||
id=self.sys_dbus.agent.datadisk.current_device,
|
||||
id=self.sys_dbus.agent.datadisk.current_device.as_posix(),
|
||||
size=0,
|
||||
device_path=self.sys_dbus.agent.datadisk.current_device,
|
||||
device_path=self.sys_dbus.agent.datadisk.current_device.as_posix(),
|
||||
object_path="",
|
||||
device_object_path="",
|
||||
)
|
||||
|
@ -157,6 +163,16 @@ class DataDisk(CoreSysAttributes):
|
|||
|
||||
return available
|
||||
|
||||
@property
|
||||
def check_multiple_data_disks(self) -> CheckMultipleDataDisks:
|
||||
"""Resolution center check for multiple data disks."""
|
||||
return self.sys_resolution.check.get("multiple_data_disks")
|
||||
|
||||
@property
|
||||
def check_disabled_data_disk(self) -> CheckDisabledDataDisk:
|
||||
"""Resolution center check for disabled data disk."""
|
||||
return self.sys_resolution.check.get("disabled_data_disk")
|
||||
|
||||
def _get_block_devices_for_drive(self, drive: UDisks2Drive) -> list[UDisks2Block]:
|
||||
"""Get block devices for a drive."""
|
||||
return [
|
||||
|
@ -172,6 +188,14 @@ class DataDisk(CoreSysAttributes):
|
|||
if self.sys_dbus.agent.version >= AwesomeVersion("1.2.0"):
|
||||
await self.sys_dbus.agent.datadisk.reload_device()
|
||||
|
||||
# Register for signals on devices added/removed
|
||||
self.sys_dbus.udisks2.udisks2_object_manager.dbus.object_manager.on_interfaces_added(
|
||||
self._udisks2_interface_added
|
||||
)
|
||||
self.sys_dbus.udisks2.udisks2_object_manager.dbus.object_manager.on_interfaces_removed(
|
||||
self._udisks2_interface_removed
|
||||
)
|
||||
|
||||
@Job(
|
||||
name="data_disk_migrate",
|
||||
conditions=[JobCondition.HAOS, JobCondition.OS_AGENT, JobCondition.HEALTHY],
|
||||
|
@ -272,6 +296,35 @@ class DataDisk(CoreSysAttributes):
|
|||
_LOGGER.warning,
|
||||
) from err
|
||||
|
||||
@Job(
|
||||
name="data_disk_wipe",
|
||||
conditions=[JobCondition.HAOS, JobCondition.OS_AGENT, JobCondition.HEALTHY],
|
||||
limit=JobExecutionLimit.ONCE,
|
||||
on_condition=HassOSJobError,
|
||||
)
|
||||
async def wipe_disk(self) -> None:
|
||||
"""Wipe the current data disk."""
|
||||
_LOGGER.info("Scheduling wipe of data disk on next reboot")
|
||||
try:
|
||||
if not await self.sys_dbus.agent.system.schedule_wipe_device():
|
||||
raise HassOSDataDiskError(
|
||||
"Can't schedule wipe of data disk, check host logs for details",
|
||||
_LOGGER.error,
|
||||
)
|
||||
except DBusError as err:
|
||||
raise HassOSDataDiskError(
|
||||
f"Can't schedule wipe of data disk: {err!s}", _LOGGER.error
|
||||
) from err
|
||||
|
||||
_LOGGER.info("Rebooting the host to finish the wipe")
|
||||
try:
|
||||
await self.sys_host.control.reboot()
|
||||
except (HostError, DBusError) as err:
|
||||
raise HassOSError(
|
||||
f"Can't restart device to finish data disk wipe: {err!s}",
|
||||
_LOGGER.warning,
|
||||
) from err
|
||||
|
||||
async def _format_device_with_single_partition(
|
||||
self, new_disk: Disk
|
||||
) -> UDisks2Block:
|
||||
|
@ -319,3 +372,54 @@ class DataDisk(CoreSysAttributes):
|
|||
"New data partition prepared on device %s", partition_block.device
|
||||
)
|
||||
return partition_block
|
||||
|
||||
async def _udisks2_interface_added(
|
||||
self, _: str, properties: dict[str, dict[str, Any]]
|
||||
):
|
||||
"""If a data disk is added, trigger the resolution check."""
|
||||
if (
|
||||
DBUS_IFACE_BLOCK not in properties
|
||||
or DBUS_ATTR_ID_LABEL not in properties[DBUS_IFACE_BLOCK]
|
||||
):
|
||||
return
|
||||
|
||||
if (
|
||||
properties[DBUS_IFACE_BLOCK][DBUS_ATTR_ID_LABEL]
|
||||
== FILESYSTEM_LABEL_DATA_DISK
|
||||
):
|
||||
check = self.check_multiple_data_disks
|
||||
elif (
|
||||
properties[DBUS_IFACE_BLOCK][DBUS_ATTR_ID_LABEL]
|
||||
== FILESYSTEM_LABEL_DISABLED_DATA_DISK
|
||||
):
|
||||
check = self.check_disabled_data_disk
|
||||
else:
|
||||
return
|
||||
|
||||
# Delay briefly before running check to allow data updates to occur
|
||||
await asyncio.sleep(0.1)
|
||||
await check()
|
||||
|
||||
async def _udisks2_interface_removed(self, _: str, interfaces: list[str]):
|
||||
"""If affected by a data disk issue, re-check on removal of a block device."""
|
||||
if DBUS_IFACE_BLOCK not in interfaces:
|
||||
return
|
||||
|
||||
if any(
|
||||
issue.type == self.check_multiple_data_disks.issue
|
||||
and issue.context == self.check_multiple_data_disks.context
|
||||
for issue in self.sys_resolution.issues
|
||||
):
|
||||
check = self.check_multiple_data_disks
|
||||
elif any(
|
||||
issue.type == self.check_disabled_data_disk.issue
|
||||
and issue.context == self.check_disabled_data_disk.context
|
||||
for issue in self.sys_resolution.issues
|
||||
):
|
||||
check = self.check_disabled_data_disk
|
||||
else:
|
||||
return
|
||||
|
||||
# Delay briefly before running check to allow data updates to occur
|
||||
await asyncio.sleep(0.1)
|
||||
await check()
|
||||
|
|
|
@ -1,8 +1,10 @@
|
|||
"""OS support on supervisor."""
|
||||
from collections.abc import Awaitable
|
||||
from dataclasses import dataclass
|
||||
from datetime import datetime
|
||||
import errno
|
||||
import logging
|
||||
from pathlib import Path
|
||||
from pathlib import Path, PurePath
|
||||
|
||||
import aiohttp
|
||||
from awesomeversion import AwesomeVersion, AwesomeVersionException
|
||||
|
@ -10,16 +12,109 @@ from cpe import CPE
|
|||
|
||||
from ..coresys import CoreSys, CoreSysAttributes
|
||||
from ..dbus.agent.boards.const import BOARD_NAME_SUPERVISED
|
||||
from ..dbus.rauc import RaucState
|
||||
from ..exceptions import DBusError, HassOSJobError, HassOSUpdateError
|
||||
from ..dbus.rauc import RaucState, SlotStatusDataType
|
||||
from ..exceptions import (
|
||||
DBusError,
|
||||
HassOSJobError,
|
||||
HassOSSlotNotFound,
|
||||
HassOSSlotUpdateError,
|
||||
HassOSUpdateError,
|
||||
)
|
||||
from ..jobs.const import JobCondition, JobExecutionLimit
|
||||
from ..jobs.decorator import Job
|
||||
from ..resolution.const import UnhealthyReason
|
||||
from ..utils.sentry import capture_exception
|
||||
from .data_disk import DataDisk
|
||||
|
||||
_LOGGER: logging.Logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@dataclass(slots=True, frozen=True)
|
||||
class SlotStatus:
|
||||
"""Status of a slot."""
|
||||
|
||||
class_: str
|
||||
type_: str
|
||||
state: str
|
||||
device: PurePath
|
||||
bundle_compatible: str | None = None
|
||||
sha256: str | None = None
|
||||
size: int | None = None
|
||||
installed_count: int | None = None
|
||||
bundle_version: AwesomeVersion | None = None
|
||||
installed_timestamp: datetime | None = None
|
||||
status: str | None = None
|
||||
activated_count: int | None = None
|
||||
activated_timestamp: datetime | None = None
|
||||
boot_status: RaucState | None = None
|
||||
bootname: str | None = None
|
||||
parent: str | None = None
|
||||
|
||||
@classmethod
|
||||
def from_dict(cls, data: SlotStatusDataType) -> "SlotStatus":
|
||||
"""Create SlotStatus from dictionary."""
|
||||
return cls(
|
||||
class_=data["class"],
|
||||
type_=data["type"],
|
||||
state=data["state"],
|
||||
device=PurePath(data["device"]),
|
||||
bundle_compatible=data.get("bundle.compatible"),
|
||||
sha256=data.get("sha256"),
|
||||
size=data.get("size"),
|
||||
installed_count=data.get("installed.count"),
|
||||
bundle_version=AwesomeVersion(data["bundle.version"])
|
||||
if "bundle.version" in data
|
||||
else None,
|
||||
installed_timestamp=datetime.fromisoformat(data["installed.timestamp"])
|
||||
if "installed.timestamp" in data
|
||||
else None,
|
||||
status=data.get("status"),
|
||||
activated_count=data.get("activated.count"),
|
||||
activated_timestamp=datetime.fromisoformat(data["activated.timestamp"])
|
||||
if "activated.timestamp" in data
|
||||
else None,
|
||||
boot_status=data.get("boot-status"),
|
||||
bootname=data.get("bootname"),
|
||||
parent=data.get("parent"),
|
||||
)
|
||||
|
||||
def to_dict(self) -> SlotStatusDataType:
|
||||
"""Get dictionary representation."""
|
||||
out: SlotStatusDataType = {
|
||||
"class": self.class_,
|
||||
"type": self.type_,
|
||||
"state": self.state,
|
||||
"device": self.device.as_posix(),
|
||||
}
|
||||
|
||||
if self.bundle_compatible is not None:
|
||||
out["bundle.compatible"] = self.bundle_compatible
|
||||
if self.sha256 is not None:
|
||||
out["sha256"] = self.sha256
|
||||
if self.size is not None:
|
||||
out["size"] = self.size
|
||||
if self.installed_count is not None:
|
||||
out["installed.count"] = self.installed_count
|
||||
if self.bundle_version is not None:
|
||||
out["bundle.version"] = str(self.bundle_version)
|
||||
if self.installed_timestamp is not None:
|
||||
out["installed.timestamp"] = str(self.installed_timestamp)
|
||||
if self.status is not None:
|
||||
out["status"] = self.status
|
||||
if self.activated_count is not None:
|
||||
out["activated.count"] = self.activated_count
|
||||
if self.activated_timestamp:
|
||||
out["activated.timestamp"] = str(self.activated_timestamp)
|
||||
if self.boot_status:
|
||||
out["boot-status"] = self.boot_status
|
||||
if self.bootname is not None:
|
||||
out["bootname"] = self.bootname
|
||||
if self.parent is not None:
|
||||
out["parent"] = self.parent
|
||||
|
||||
return out
|
||||
|
||||
|
||||
class OSManager(CoreSysAttributes):
|
||||
"""OS interface inside supervisor."""
|
||||
|
||||
|
@ -31,6 +126,7 @@ class OSManager(CoreSysAttributes):
|
|||
self._version: AwesomeVersion | None = None
|
||||
self._board: str | None = None
|
||||
self._os_name: str | None = None
|
||||
self._slots: dict[str, SlotStatus] | None = None
|
||||
|
||||
@property
|
||||
def available(self) -> bool:
|
||||
|
@ -70,6 +166,20 @@ class OSManager(CoreSysAttributes):
|
|||
"""Return Operating-System datadisk."""
|
||||
return self._datadisk
|
||||
|
||||
@property
|
||||
def slots(self) -> list[SlotStatus]:
|
||||
"""Return status of slots."""
|
||||
if not self._slots:
|
||||
return []
|
||||
return list(self._slots.values())
|
||||
|
||||
def get_slot_name(self, boot_name: str) -> str:
|
||||
"""Get slot name from boot name."""
|
||||
for name, status in self._slots.items():
|
||||
if status.bootname == boot_name:
|
||||
return name
|
||||
raise HassOSSlotNotFound()
|
||||
|
||||
def _get_download_url(self, version: AwesomeVersion) -> str:
|
||||
raw_url = self.sys_updater.ota_url
|
||||
if raw_url is None:
|
||||
|
@ -128,6 +238,14 @@ class OSManager(CoreSysAttributes):
|
|||
f"Can't write OTA file: {err!s}", _LOGGER.error
|
||||
) from err
|
||||
|
||||
@Job(name="os_manager_reload", conditions=[JobCondition.HAOS], internal=True)
|
||||
async def reload(self) -> None:
|
||||
"""Update cache of slot statuses."""
|
||||
self._slots = {
|
||||
slot[0]: SlotStatus.from_dict(slot[1])
|
||||
for slot in await self.sys_dbus.rauc.get_slot_status()
|
||||
}
|
||||
|
||||
async def load(self) -> None:
|
||||
"""Load HassOS data."""
|
||||
try:
|
||||
|
@ -149,6 +267,7 @@ class OSManager(CoreSysAttributes):
|
|||
self._version = AwesomeVersion(cpe.get_version()[0])
|
||||
self._board = cpe.get_target_hardware()[0]
|
||||
self._os_name = cpe.get_product()[0]
|
||||
await self.reload()
|
||||
|
||||
await self.datadisk.load()
|
||||
|
||||
|
@ -239,3 +358,27 @@ class OSManager(CoreSysAttributes):
|
|||
_LOGGER.error("Can't mark booted partition as healthy!")
|
||||
else:
|
||||
_LOGGER.info("Rauc: %s - %s", self.sys_dbus.rauc.boot_slot, response[1])
|
||||
await self.reload()
|
||||
|
||||
@Job(
|
||||
name="os_manager_set_boot_slot",
|
||||
conditions=[JobCondition.HAOS],
|
||||
on_condition=HassOSJobError,
|
||||
internal=True,
|
||||
)
|
||||
async def set_boot_slot(self, boot_name: str) -> None:
|
||||
"""Set active boot slot."""
|
||||
try:
|
||||
response = await self.sys_dbus.rauc.mark(
|
||||
RaucState.ACTIVE, self.get_slot_name(boot_name)
|
||||
)
|
||||
except DBusError as err:
|
||||
capture_exception(err)
|
||||
raise HassOSSlotUpdateError(
|
||||
f"Can't mark {boot_name} as active!", _LOGGER.error
|
||||
) from err
|
||||
|
||||
_LOGGER.info("Rauc: %s - %s", self.sys_dbus.rauc.boot_slot, response[1])
|
||||
|
||||
_LOGGER.info("Rebooting into new boot slot now")
|
||||
await self.sys_host.control.reboot()
|
||||
|
|
|
@ -2,8 +2,6 @@
|
|||
|
||||
Code: https://github.com/home-assistant/plugin-audio
|
||||
"""
|
||||
import asyncio
|
||||
from contextlib import suppress
|
||||
import errno
|
||||
import logging
|
||||
from pathlib import Path, PurePath
|
||||
|
@ -72,6 +70,11 @@ class PluginAudio(PluginBase):
|
|||
"""Return Path to pulse audio config file."""
|
||||
return Path(self.sys_config.path_audio, "pulse_audio.json")
|
||||
|
||||
@property
|
||||
def default_image(self) -> str:
|
||||
"""Return default image for audio plugin."""
|
||||
return self.sys_updater.image_audio
|
||||
|
||||
@property
|
||||
def latest_version(self) -> AwesomeVersion | None:
|
||||
"""Return latest version of Audio."""
|
||||
|
@ -102,28 +105,6 @@ class PluginAudio(PluginBase):
|
|||
self.sys_resolution.unhealthy = UnhealthyReason.OSERROR_BAD_MESSAGE
|
||||
_LOGGER.error("Can't create default asound: %s", err)
|
||||
|
||||
async def install(self) -> None:
|
||||
"""Install Audio."""
|
||||
_LOGGER.info("Setup Audio plugin")
|
||||
while True:
|
||||
# read audio tag and install it
|
||||
if not self.latest_version:
|
||||
await self.sys_updater.reload()
|
||||
|
||||
if self.latest_version:
|
||||
with suppress(DockerError):
|
||||
await self.instance.install(
|
||||
self.latest_version, image=self.sys_updater.image_audio
|
||||
)
|
||||
break
|
||||
_LOGGER.warning("Error on installing Audio plugin, retrying in 30sec")
|
||||
await asyncio.sleep(30)
|
||||
|
||||
_LOGGER.info("Audio plugin now installed")
|
||||
self.version = self.instance.version
|
||||
self.image = self.sys_updater.image_audio
|
||||
self.save_data()
|
||||
|
||||
@Job(
|
||||
name="plugin_audio_update",
|
||||
conditions=PLUGIN_UPDATE_CONDITIONS,
|
||||
|
@ -131,29 +112,11 @@ class PluginAudio(PluginBase):
|
|||
)
|
||||
async def update(self, version: str | None = None) -> None:
|
||||
"""Update Audio plugin."""
|
||||
version = version or self.latest_version
|
||||
old_image = self.image
|
||||
|
||||
if version == self.version:
|
||||
_LOGGER.warning("Version %s is already installed for Audio", version)
|
||||
return
|
||||
|
||||
try:
|
||||
await self.instance.update(version, image=self.sys_updater.image_audio)
|
||||
await super().update(version)
|
||||
except DockerError as err:
|
||||
raise AudioUpdateError("Audio update failed", _LOGGER.error) from err
|
||||
|
||||
self.version = version
|
||||
self.image = self.sys_updater.image_audio
|
||||
self.save_data()
|
||||
|
||||
# Cleanup
|
||||
with suppress(DockerError):
|
||||
await self.instance.cleanup(old_image=old_image)
|
||||
|
||||
# Start Audio
|
||||
await self.start()
|
||||
|
||||
async def restart(self) -> None:
|
||||
"""Restart Audio plugin."""
|
||||
_LOGGER.info("Restarting Audio plugin")
|
||||
|
|
|
@ -36,12 +36,17 @@ class PluginBase(ABC, FileConfiguration, CoreSysAttributes):
|
|||
"""Set current version of the plugin."""
|
||||
self._data[ATTR_VERSION] = value
|
||||
|
||||
@property
|
||||
def default_image(self) -> str:
|
||||
"""Return default image for plugin."""
|
||||
return f"ghcr.io/home-assistant/{self.sys_arch.supervisor}-hassio-{self.slug}"
|
||||
|
||||
@property
|
||||
def image(self) -> str:
|
||||
"""Return current image of plugin."""
|
||||
if self._data.get(ATTR_IMAGE):
|
||||
return self._data[ATTR_IMAGE]
|
||||
return f"ghcr.io/home-assistant/{self.sys_arch.supervisor}-hassio-{self.slug}"
|
||||
return self.default_image
|
||||
|
||||
@image.setter
|
||||
def image(self, value: str) -> None:
|
||||
|
@ -160,6 +165,8 @@ class PluginBase(ABC, FileConfiguration, CoreSysAttributes):
|
|||
await self.instance.attach(
|
||||
version=self.version, skip_state_event_if_down=True
|
||||
)
|
||||
|
||||
await self.instance.check_image(self.version, self.default_image)
|
||||
except DockerError:
|
||||
_LOGGER.info(
|
||||
"No %s plugin Docker image %s found.", self.slug, self.instance.image
|
||||
|
@ -170,7 +177,7 @@ class PluginBase(ABC, FileConfiguration, CoreSysAttributes):
|
|||
await self.install()
|
||||
else:
|
||||
self.version = self.instance.version
|
||||
self.image = self.instance.image
|
||||
self.image = self.default_image
|
||||
self.save_data()
|
||||
|
||||
# Run plugin
|
||||
|
@ -178,13 +185,52 @@ class PluginBase(ABC, FileConfiguration, CoreSysAttributes):
|
|||
if not await self.instance.is_running():
|
||||
await self.start()
|
||||
|
||||
@abstractmethod
|
||||
async def install(self) -> None:
|
||||
"""Install system plugin."""
|
||||
_LOGGER.info("Setup %s plugin", self.slug)
|
||||
while True:
|
||||
# read plugin tag and install it
|
||||
if not self.latest_version:
|
||||
await self.sys_updater.reload()
|
||||
|
||||
if self.latest_version:
|
||||
with suppress(DockerError):
|
||||
await self.instance.install(
|
||||
self.latest_version, image=self.default_image
|
||||
)
|
||||
break
|
||||
_LOGGER.warning(
|
||||
"Error on installing %s plugin, retrying in 30sec", self.slug
|
||||
)
|
||||
await asyncio.sleep(30)
|
||||
|
||||
_LOGGER.info("%s plugin now installed", self.slug)
|
||||
self.version = self.instance.version
|
||||
self.image = self.default_image
|
||||
self.save_data()
|
||||
|
||||
@abstractmethod
|
||||
async def update(self, version: str | None = None) -> None:
|
||||
"""Update system plugin."""
|
||||
version = version or self.latest_version
|
||||
old_image = self.image
|
||||
|
||||
if version == self.version:
|
||||
_LOGGER.warning(
|
||||
"Version %s is already installed for %s", version, self.slug
|
||||
)
|
||||
return
|
||||
|
||||
await self.instance.update(version, image=self.default_image)
|
||||
self.version = self.instance.version
|
||||
self.image = self.default_image
|
||||
self.save_data()
|
||||
|
||||
# Cleanup
|
||||
with suppress(DockerError):
|
||||
await self.instance.cleanup(old_image=old_image)
|
||||
|
||||
# Start plugin
|
||||
await self.start()
|
||||
|
||||
@abstractmethod
|
||||
async def repair(self) -> None:
|
||||
|
|
|
@ -2,9 +2,7 @@
|
|||
|
||||
Code: https://github.com/home-assistant/plugin-cli
|
||||
"""
|
||||
import asyncio
|
||||
from collections.abc import Awaitable
|
||||
from contextlib import suppress
|
||||
import logging
|
||||
import secrets
|
||||
|
||||
|
@ -41,6 +39,11 @@ class PluginCli(PluginBase):
|
|||
self.coresys: CoreSys = coresys
|
||||
self.instance: DockerCli = DockerCli(coresys)
|
||||
|
||||
@property
|
||||
def default_image(self) -> str:
|
||||
"""Return default image for cli plugin."""
|
||||
return self.sys_updater.image_cli
|
||||
|
||||
@property
|
||||
def latest_version(self) -> AwesomeVersion | None:
|
||||
"""Return version of latest cli."""
|
||||
|
@ -51,29 +54,6 @@ class PluginCli(PluginBase):
|
|||
"""Return an access token for the Supervisor API."""
|
||||
return self._data.get(ATTR_ACCESS_TOKEN)
|
||||
|
||||
async def install(self) -> None:
|
||||
"""Install cli."""
|
||||
_LOGGER.info("Running setup for CLI plugin")
|
||||
while True:
|
||||
# read cli tag and install it
|
||||
if not self.latest_version:
|
||||
await self.sys_updater.reload()
|
||||
|
||||
if self.latest_version:
|
||||
with suppress(DockerError):
|
||||
await self.instance.install(
|
||||
self.latest_version,
|
||||
image=self.sys_updater.image_cli,
|
||||
)
|
||||
break
|
||||
_LOGGER.warning("Error on install cli plugin. Retrying in 30sec")
|
||||
await asyncio.sleep(30)
|
||||
|
||||
_LOGGER.info("CLI plugin is now installed")
|
||||
self.version = self.instance.version
|
||||
self.image = self.sys_updater.image_cli
|
||||
self.save_data()
|
||||
|
||||
@Job(
|
||||
name="plugin_cli_update",
|
||||
conditions=PLUGIN_UPDATE_CONDITIONS,
|
||||
|
@ -81,29 +61,11 @@ class PluginCli(PluginBase):
|
|||
)
|
||||
async def update(self, version: AwesomeVersion | None = None) -> None:
|
||||
"""Update local HA cli."""
|
||||
version = version or self.latest_version
|
||||
old_image = self.image
|
||||
|
||||
if version == self.version:
|
||||
_LOGGER.warning("Version %s is already installed for CLI", version)
|
||||
return
|
||||
|
||||
try:
|
||||
await self.instance.update(version, image=self.sys_updater.image_cli)
|
||||
await super().update(version)
|
||||
except DockerError as err:
|
||||
raise CliUpdateError("CLI update failed", _LOGGER.error) from err
|
||||
|
||||
self.version = version
|
||||
self.image = self.sys_updater.image_cli
|
||||
self.save_data()
|
||||
|
||||
# Cleanup
|
||||
with suppress(DockerError):
|
||||
await self.instance.cleanup(old_image=old_image)
|
||||
|
||||
# Start cli
|
||||
await self.start()
|
||||
|
||||
async def start(self) -> None:
|
||||
"""Run cli."""
|
||||
# Create new API token
|
||||
|
|
|
@ -108,6 +108,11 @@ class PluginDns(PluginBase):
|
|||
"""Return list of DNS servers."""
|
||||
self._data[ATTR_SERVERS] = value
|
||||
|
||||
@property
|
||||
def default_image(self) -> str:
|
||||
"""Return default image for dns plugin."""
|
||||
return self.sys_updater.image_dns
|
||||
|
||||
@property
|
||||
def latest_version(self) -> AwesomeVersion | None:
|
||||
"""Return latest version of CoreDNS."""
|
||||
|
@ -168,25 +173,7 @@ class PluginDns(PluginBase):
|
|||
|
||||
async def install(self) -> None:
|
||||
"""Install CoreDNS."""
|
||||
_LOGGER.info("Running setup for CoreDNS plugin")
|
||||
while True:
|
||||
# read homeassistant tag and install it
|
||||
if not self.latest_version:
|
||||
await self.sys_updater.reload()
|
||||
|
||||
if self.latest_version:
|
||||
with suppress(DockerError):
|
||||
await self.instance.install(
|
||||
self.latest_version, image=self.sys_updater.image_dns
|
||||
)
|
||||
break
|
||||
_LOGGER.warning("Error on install CoreDNS plugin. Retrying in 30sec")
|
||||
await asyncio.sleep(30)
|
||||
|
||||
_LOGGER.info("CoreDNS plugin now installed")
|
||||
self.version = self.instance.version
|
||||
self.image = self.sys_updater.image_dns
|
||||
self.save_data()
|
||||
await super().install()
|
||||
|
||||
# Init Hosts
|
||||
await self.write_hosts()
|
||||
|
@ -198,30 +185,11 @@ class PluginDns(PluginBase):
|
|||
)
|
||||
async def update(self, version: AwesomeVersion | None = None) -> None:
|
||||
"""Update CoreDNS plugin."""
|
||||
version = version or self.latest_version
|
||||
old_image = self.image
|
||||
|
||||
if version == self.version:
|
||||
_LOGGER.warning("Version %s is already installed for CoreDNS", version)
|
||||
return
|
||||
|
||||
# Update
|
||||
try:
|
||||
await self.instance.update(version, image=self.sys_updater.image_dns)
|
||||
await super().update(version)
|
||||
except DockerError as err:
|
||||
raise CoreDNSUpdateError("CoreDNS update failed", _LOGGER.error) from err
|
||||
|
||||
self.version = version
|
||||
self.image = self.sys_updater.image_dns
|
||||
self.save_data()
|
||||
|
||||
# Cleanup
|
||||
with suppress(DockerError):
|
||||
await self.instance.cleanup(old_image=old_image)
|
||||
|
||||
# Start CoreDNS
|
||||
await self.start()
|
||||
|
||||
async def restart(self) -> None:
|
||||
"""Restart CoreDNS plugin."""
|
||||
self._write_config()
|
||||
|
|
|
@ -2,8 +2,6 @@
|
|||
|
||||
Code: https://github.com/home-assistant/plugin-multicast
|
||||
"""
|
||||
import asyncio
|
||||
from contextlib import suppress
|
||||
import logging
|
||||
|
||||
from awesomeversion import AwesomeVersion
|
||||
|
@ -43,33 +41,16 @@ class PluginMulticast(PluginBase):
|
|||
self.coresys: CoreSys = coresys
|
||||
self.instance: DockerMulticast = DockerMulticast(coresys)
|
||||
|
||||
@property
|
||||
def default_image(self) -> str:
|
||||
"""Return default image for multicast plugin."""
|
||||
return self.sys_updater.image_multicast
|
||||
|
||||
@property
|
||||
def latest_version(self) -> AwesomeVersion | None:
|
||||
"""Return latest version of Multicast."""
|
||||
return self.sys_updater.version_multicast
|
||||
|
||||
async def install(self) -> None:
|
||||
"""Install Multicast."""
|
||||
_LOGGER.info("Running setup for Multicast plugin")
|
||||
while True:
|
||||
# read multicast tag and install it
|
||||
if not self.latest_version:
|
||||
await self.sys_updater.reload()
|
||||
|
||||
if self.latest_version:
|
||||
with suppress(DockerError):
|
||||
await self.instance.install(
|
||||
self.latest_version, image=self.sys_updater.image_multicast
|
||||
)
|
||||
break
|
||||
_LOGGER.warning("Error on install Multicast plugin. Retrying in 30sec")
|
||||
await asyncio.sleep(30)
|
||||
|
||||
_LOGGER.info("Multicast plugin is now installed")
|
||||
self.version = self.instance.version
|
||||
self.image = self.sys_updater.image_multicast
|
||||
self.save_data()
|
||||
|
||||
@Job(
|
||||
name="plugin_multicast_update",
|
||||
conditions=PLUGIN_UPDATE_CONDITIONS,
|
||||
|
@ -77,32 +58,13 @@ class PluginMulticast(PluginBase):
|
|||
)
|
||||
async def update(self, version: AwesomeVersion | None = None) -> None:
|
||||
"""Update Multicast plugin."""
|
||||
version = version or self.latest_version
|
||||
old_image = self.image
|
||||
|
||||
if version == self.version:
|
||||
_LOGGER.warning("Version %s is already installed for Multicast", version)
|
||||
return
|
||||
|
||||
# Update
|
||||
try:
|
||||
await self.instance.update(version, image=self.sys_updater.image_multicast)
|
||||
await super().update(version)
|
||||
except DockerError as err:
|
||||
raise MulticastUpdateError(
|
||||
"Multicast update failed", _LOGGER.error
|
||||
) from err
|
||||
|
||||
self.version = version
|
||||
self.image = self.sys_updater.image_multicast
|
||||
self.save_data()
|
||||
|
||||
# Cleanup
|
||||
with suppress(DockerError):
|
||||
await self.instance.cleanup(old_image=old_image)
|
||||
|
||||
# Start Multicast plugin
|
||||
await self.start()
|
||||
|
||||
async def restart(self) -> None:
|
||||
"""Restart Multicast plugin."""
|
||||
_LOGGER.info("Restarting Multicast plugin")
|
||||
|
|
|
@ -2,8 +2,6 @@
|
|||
|
||||
Code: https://github.com/home-assistant/plugin-observer
|
||||
"""
|
||||
import asyncio
|
||||
from contextlib import suppress
|
||||
import logging
|
||||
import secrets
|
||||
|
||||
|
@ -46,6 +44,11 @@ class PluginObserver(PluginBase):
|
|||
self.coresys: CoreSys = coresys
|
||||
self.instance: DockerObserver = DockerObserver(coresys)
|
||||
|
||||
@property
|
||||
def default_image(self) -> str:
|
||||
"""Return default image for observer plugin."""
|
||||
return self.sys_updater.image_observer
|
||||
|
||||
@property
|
||||
def latest_version(self) -> AwesomeVersion | None:
|
||||
"""Return version of latest observer."""
|
||||
|
@ -56,28 +59,6 @@ class PluginObserver(PluginBase):
|
|||
"""Return an access token for the Observer API."""
|
||||
return self._data.get(ATTR_ACCESS_TOKEN)
|
||||
|
||||
async def install(self) -> None:
|
||||
"""Install observer."""
|
||||
_LOGGER.info("Running setup for observer plugin")
|
||||
while True:
|
||||
# read observer tag and install it
|
||||
if not self.latest_version:
|
||||
await self.sys_updater.reload()
|
||||
|
||||
if self.latest_version:
|
||||
with suppress(DockerError):
|
||||
await self.instance.install(
|
||||
self.latest_version, image=self.sys_updater.image_observer
|
||||
)
|
||||
break
|
||||
_LOGGER.warning("Error on install observer plugin. Retrying in 30sec")
|
||||
await asyncio.sleep(30)
|
||||
|
||||
_LOGGER.info("observer plugin now installed")
|
||||
self.version = self.instance.version
|
||||
self.image = self.sys_updater.image_observer
|
||||
self.save_data()
|
||||
|
||||
@Job(
|
||||
name="plugin_observer_update",
|
||||
conditions=PLUGIN_UPDATE_CONDITIONS,
|
||||
|
@ -85,29 +66,12 @@ class PluginObserver(PluginBase):
|
|||
)
|
||||
async def update(self, version: AwesomeVersion | None = None) -> None:
|
||||
"""Update local HA observer."""
|
||||
version = version or self.latest_version
|
||||
old_image = self.image
|
||||
|
||||
if version == self.version:
|
||||
_LOGGER.warning("Version %s is already installed for observer", version)
|
||||
return
|
||||
|
||||
try:
|
||||
await self.instance.update(version, image=self.sys_updater.image_observer)
|
||||
await super().update(version)
|
||||
except DockerError as err:
|
||||
_LOGGER.error("HA observer update failed")
|
||||
raise ObserverUpdateError() from err
|
||||
|
||||
self.version = version
|
||||
self.image = self.sys_updater.image_observer
|
||||
self.save_data()
|
||||
|
||||
# Cleanup
|
||||
with suppress(DockerError):
|
||||
await self.instance.cleanup(old_image=old_image)
|
||||
|
||||
# Start observer
|
||||
await self.start()
|
||||
raise ObserverUpdateError(
|
||||
"HA observer update failed", _LOGGER.error
|
||||
) from err
|
||||
|
||||
async def start(self) -> None:
|
||||
"""Run observer."""
|
||||
|
|
|
@ -0,0 +1,63 @@
|
|||
"""Helpers to check for a disabled data disk."""
|
||||
|
||||
from pathlib import Path
|
||||
|
||||
from ...const import CoreState
|
||||
from ...coresys import CoreSys
|
||||
from ...dbus.udisks2.block import UDisks2Block
|
||||
from ...dbus.udisks2.data import DeviceSpecification
|
||||
from ...os.const import FILESYSTEM_LABEL_DISABLED_DATA_DISK
|
||||
from ..const import ContextType, IssueType, SuggestionType
|
||||
from .base import CheckBase
|
||||
|
||||
|
||||
def setup(coresys: CoreSys) -> CheckBase:
|
||||
"""Check setup function."""
|
||||
return CheckDisabledDataDisk(coresys)
|
||||
|
||||
|
||||
class CheckDisabledDataDisk(CheckBase):
|
||||
"""CheckDisabledDataDisk class for check."""
|
||||
|
||||
async def run_check(self) -> None:
|
||||
"""Run check if not affected by issue."""
|
||||
for block_device in self.sys_dbus.udisks2.block_devices:
|
||||
if self._is_disabled_data_disk(block_device):
|
||||
self.sys_resolution.create_issue(
|
||||
IssueType.DISABLED_DATA_DISK,
|
||||
ContextType.SYSTEM,
|
||||
reference=block_device.device.as_posix(),
|
||||
suggestions=[
|
||||
SuggestionType.RENAME_DATA_DISK,
|
||||
SuggestionType.ADOPT_DATA_DISK,
|
||||
],
|
||||
)
|
||||
|
||||
async def approve_check(self, reference: str | None = None) -> bool:
|
||||
"""Approve check if it is affected by issue."""
|
||||
resolved = await self.sys_dbus.udisks2.resolve_device(
|
||||
DeviceSpecification(path=Path(reference))
|
||||
)
|
||||
return resolved and self._is_disabled_data_disk(resolved[0])
|
||||
|
||||
def _is_disabled_data_disk(self, block_device: UDisks2Block) -> bool:
|
||||
"""Return true if filesystem block device has name indicating it was disabled by OS."""
|
||||
return (
|
||||
block_device.filesystem
|
||||
and block_device.id_label == FILESYSTEM_LABEL_DISABLED_DATA_DISK
|
||||
)
|
||||
|
||||
@property
|
||||
def issue(self) -> IssueType:
|
||||
"""Return a IssueType enum."""
|
||||
return IssueType.DISABLED_DATA_DISK
|
||||
|
||||
@property
|
||||
def context(self) -> ContextType:
|
||||
"""Return a ContextType enum."""
|
||||
return ContextType.SYSTEM
|
||||
|
||||
@property
|
||||
def states(self) -> list[CoreState]:
|
||||
"""Return a list of valid states when this check can run."""
|
||||
return [CoreState.RUNNING, CoreState.SETUP]
|
|
@ -27,7 +27,10 @@ class CheckMultipleDataDisks(CheckBase):
|
|||
IssueType.MULTIPLE_DATA_DISKS,
|
||||
ContextType.SYSTEM,
|
||||
reference=block_device.device.as_posix(),
|
||||
suggestions=[SuggestionType.RENAME_DATA_DISK],
|
||||
suggestions=[
|
||||
SuggestionType.RENAME_DATA_DISK,
|
||||
SuggestionType.ADOPT_DATA_DISK,
|
||||
],
|
||||
)
|
||||
|
||||
async def approve_check(self, reference: str | None = None) -> bool:
|
||||
|
@ -58,4 +61,4 @@ class CheckMultipleDataDisks(CheckBase):
|
|||
@property
|
||||
def states(self) -> list[CoreState]:
|
||||
"""Return a list of valid states when this check can run."""
|
||||
return [CoreState.RUNNING, CoreState.STARTUP]
|
||||
return [CoreState.RUNNING, CoreState.SETUP]
|
||||
|
|
|
@ -53,6 +53,7 @@ class UnsupportedReason(StrEnum):
|
|||
SYSTEMD = "systemd"
|
||||
SYSTEMD_JOURNAL = "systemd_journal"
|
||||
SYSTEMD_RESOLVED = "systemd_resolved"
|
||||
VIRTUALIZATION_IMAGE = "virtualization_image"
|
||||
|
||||
|
||||
class UnhealthyReason(StrEnum):
|
||||
|
@ -72,6 +73,7 @@ class IssueType(StrEnum):
|
|||
CORRUPT_DOCKER = "corrupt_docker"
|
||||
CORRUPT_REPOSITORY = "corrupt_repository"
|
||||
CORRUPT_FILESYSTEM = "corrupt_filesystem"
|
||||
DISABLED_DATA_DISK = "disabled_data_disk"
|
||||
DNS_LOOP = "dns_loop"
|
||||
DNS_SERVER_FAILED = "dns_server_failed"
|
||||
DNS_SERVER_IPV6_ERROR = "dns_server_ipv6_error"
|
||||
|
@ -95,6 +97,7 @@ class IssueType(StrEnum):
|
|||
class SuggestionType(StrEnum):
|
||||
"""Sugestion type."""
|
||||
|
||||
ADOPT_DATA_DISK = "adopt_data_disk"
|
||||
CLEAR_FULL_BACKUP = "clear_full_backup"
|
||||
CREATE_FULL_BACKUP = "create_full_backup"
|
||||
EXECUTE_INTEGRITY = "execute_integrity"
|
||||
|
|
|
@ -0,0 +1,39 @@
|
|||
"""Evaluation class for virtualization image."""
|
||||
|
||||
from ...const import CoreState
|
||||
from ...coresys import CoreSys
|
||||
from ..const import UnsupportedReason
|
||||
from .base import EvaluateBase
|
||||
|
||||
|
||||
def setup(coresys: CoreSys) -> EvaluateBase:
|
||||
"""Initialize evaluation-setup function."""
|
||||
return EvaluateVirtualizationImage(coresys)
|
||||
|
||||
|
||||
class EvaluateVirtualizationImage(EvaluateBase):
|
||||
"""Evaluate correct OS image used when running under virtualization."""
|
||||
|
||||
@property
|
||||
def reason(self) -> UnsupportedReason:
|
||||
"""Return a UnsupportedReason enum."""
|
||||
return UnsupportedReason.VIRTUALIZATION_IMAGE
|
||||
|
||||
@property
|
||||
def on_failure(self) -> str:
|
||||
"""Return a string that is printed when self.evaluate is True."""
|
||||
return "Image of Home Assistant OS in use does not support virtualization."
|
||||
|
||||
@property
|
||||
def states(self) -> list[CoreState]:
|
||||
"""Return a list of valid states when this evaluation can run."""
|
||||
return [CoreState.SETUP]
|
||||
|
||||
async def evaluate(self):
|
||||
"""Run evaluation."""
|
||||
if not self.sys_os.available:
|
||||
return False
|
||||
return self.sys_host.info.virtualization and self.sys_os.board not in {
|
||||
"ova",
|
||||
"generic-aarch64",
|
||||
}
|
|
@ -58,9 +58,9 @@ class FixupBase(ABC, CoreSysAttributes):
|
|||
"""Return a ContextType enum."""
|
||||
|
||||
@property
|
||||
@abstractmethod
|
||||
def issues(self) -> list[IssueType]:
|
||||
"""Return a IssueType enum list."""
|
||||
return []
|
||||
|
||||
@property
|
||||
def auto(self) -> bool:
|
||||
|
|
|
@ -41,7 +41,7 @@ class FixupStoreExecuteReload(FixupBase):
|
|||
# Load data again
|
||||
try:
|
||||
await repository.load()
|
||||
await repository.update()
|
||||
await self.sys_store.reload(repository)
|
||||
except StoreError:
|
||||
raise ResolutionFixupError() from None
|
||||
|
||||
|
|
|
@ -0,0 +1,108 @@
|
|||
"""Adopt data disk fixup."""
|
||||
|
||||
import logging
|
||||
from pathlib import Path
|
||||
|
||||
from ...coresys import CoreSys
|
||||
from ...dbus.udisks2.data import DeviceSpecification
|
||||
from ...exceptions import DBusError, HostError, ResolutionFixupError
|
||||
from ...os.const import FILESYSTEM_LABEL_DATA_DISK, FILESYSTEM_LABEL_OLD_DATA_DISK
|
||||
from ..const import ContextType, IssueType, SuggestionType
|
||||
from .base import FixupBase
|
||||
|
||||
_LOGGER: logging.Logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def setup(coresys: CoreSys) -> FixupBase:
|
||||
"""Check setup function."""
|
||||
return FixupSystemAdoptDataDisk(coresys)
|
||||
|
||||
|
||||
class FixupSystemAdoptDataDisk(FixupBase):
|
||||
"""Storage class for fixup."""
|
||||
|
||||
async def process_fixup(self, reference: str | None = None) -> None:
|
||||
"""Initialize the fixup class."""
|
||||
if not (
|
||||
new_resolved := await self.sys_dbus.udisks2.resolve_device(
|
||||
DeviceSpecification(path=Path(reference))
|
||||
)
|
||||
):
|
||||
_LOGGER.info(
|
||||
"Data disk at %s with name conflict was removed, skipping adopt",
|
||||
reference,
|
||||
)
|
||||
return
|
||||
|
||||
current = self.sys_dbus.agent.datadisk.current_device
|
||||
if (
|
||||
not current
|
||||
or not (
|
||||
current_resolved := await self.sys_dbus.udisks2.resolve_device(
|
||||
DeviceSpecification(path=current)
|
||||
)
|
||||
)
|
||||
or not current_resolved[0].filesystem
|
||||
):
|
||||
raise ResolutionFixupError(
|
||||
"Cannot resolve current data disk for rename", _LOGGER.error
|
||||
)
|
||||
|
||||
if new_resolved[0].id_label != FILESYSTEM_LABEL_DATA_DISK:
|
||||
_LOGGER.info(
|
||||
"Renaming disabled data disk at %s to %s to activate it",
|
||||
reference,
|
||||
FILESYSTEM_LABEL_DATA_DISK,
|
||||
)
|
||||
try:
|
||||
await new_resolved[0].filesystem.set_label(FILESYSTEM_LABEL_DATA_DISK)
|
||||
except DBusError as err:
|
||||
raise ResolutionFixupError(
|
||||
f"Could not rename filesystem at {reference}: {err!s}",
|
||||
_LOGGER.error,
|
||||
) from err
|
||||
|
||||
_LOGGER.info(
|
||||
"Renaming current data disk at %s to %s so new data disk at %s becomes primary ",
|
||||
self.sys_dbus.agent.datadisk.current_device,
|
||||
FILESYSTEM_LABEL_OLD_DATA_DISK,
|
||||
reference,
|
||||
)
|
||||
try:
|
||||
await current_resolved[0].filesystem.set_label(
|
||||
FILESYSTEM_LABEL_OLD_DATA_DISK
|
||||
)
|
||||
except DBusError as err:
|
||||
raise ResolutionFixupError(
|
||||
f"Could not rename filesystem at {current.as_posix()}: {err!s}",
|
||||
_LOGGER.error,
|
||||
) from err
|
||||
|
||||
_LOGGER.info("Rebooting the host to finish adoption")
|
||||
try:
|
||||
await self.sys_host.control.reboot()
|
||||
except (HostError, DBusError) as err:
|
||||
_LOGGER.warning(
|
||||
"Could not reboot host to finish data disk adoption, manual reboot required to finish process: %s",
|
||||
err,
|
||||
)
|
||||
self.sys_resolution.create_issue(
|
||||
IssueType.REBOOT_REQUIRED,
|
||||
ContextType.SYSTEM,
|
||||
suggestions=[SuggestionType.EXECUTE_REBOOT],
|
||||
)
|
||||
|
||||
@property
|
||||
def suggestion(self) -> SuggestionType:
|
||||
"""Return a SuggestionType enum."""
|
||||
return SuggestionType.ADOPT_DATA_DISK
|
||||
|
||||
@property
|
||||
def context(self) -> ContextType:
|
||||
"""Return a ContextType enum."""
|
||||
return ContextType.SYSTEM
|
||||
|
||||
@property
|
||||
def issues(self) -> list[IssueType]:
|
||||
"""Return a IssueType enum list."""
|
||||
return [IssueType.DISABLED_DATA_DISK, IssueType.MULTIPLE_DATA_DISKS]
|
|
@ -2,7 +2,7 @@
|
|||
import logging
|
||||
|
||||
from ...coresys import CoreSys
|
||||
from ..const import ContextType, SuggestionType
|
||||
from ..const import ContextType, IssueType, SuggestionType
|
||||
from .base import FixupBase
|
||||
|
||||
_LOGGER: logging.Logger = logging.getLogger(__name__)
|
||||
|
@ -21,6 +21,11 @@ class FixupSystemCreateFullBackup(FixupBase):
|
|||
_LOGGER.info("Creating a full backup")
|
||||
await self.sys_backups.do_backup_full()
|
||||
|
||||
@property
|
||||
def issues(self) -> list[IssueType]:
|
||||
"""Return a IssueType enum list."""
|
||||
return [IssueType.NO_CURRENT_BACKUP]
|
||||
|
||||
@property
|
||||
def suggestion(self) -> SuggestionType:
|
||||
"""Return a SuggestionType enum."""
|
||||
|
|
|
@ -66,4 +66,4 @@ class FixupSystemRenameDataDisk(FixupBase):
|
|||
@property
|
||||
def issues(self) -> list[IssueType]:
|
||||
"""Return a IssueType enum list."""
|
||||
return [IssueType.MULTIPLE_DATA_DISKS]
|
||||
return [IssueType.DISABLED_DATA_DISK, IssueType.MULTIPLE_DATA_DISKS]
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
"""Add-on Store handler."""
|
||||
import asyncio
|
||||
from collections.abc import Awaitable
|
||||
import logging
|
||||
|
||||
from ..const import ATTR_REPOSITORIES, URL_HASSIO_ADDONS
|
||||
|
@ -85,15 +86,39 @@ class StoreManager(CoreSysAttributes, FileConfiguration):
|
|||
conditions=[JobCondition.SUPERVISOR_UPDATED],
|
||||
on_condition=StoreJobError,
|
||||
)
|
||||
async def reload(self) -> None:
|
||||
async def reload(self, repository: Repository | None = None) -> None:
|
||||
"""Update add-ons from repository and reload list."""
|
||||
tasks = [self.sys_create_task(repository.update()) for repository in self.all]
|
||||
if tasks:
|
||||
await asyncio.wait(tasks)
|
||||
# Make a copy to prevent race with other tasks
|
||||
repositories = [repository] if repository else self.all.copy()
|
||||
results: list[bool | Exception] = await asyncio.gather(
|
||||
*[repo.update() for repo in repositories], return_exceptions=True
|
||||
)
|
||||
|
||||
# read data from repositories
|
||||
await self.load()
|
||||
self._read_addons()
|
||||
# Determine which repositories were updated
|
||||
updated_repos: set[str] = set()
|
||||
for i, result in enumerate(results):
|
||||
if result is True:
|
||||
updated_repos.add(repositories[i].slug)
|
||||
elif result:
|
||||
_LOGGER.error(
|
||||
"Could not reload repository %s due to %r",
|
||||
repositories[i].slug,
|
||||
result,
|
||||
)
|
||||
|
||||
# Update path cache for all addons in updated repos
|
||||
if updated_repos:
|
||||
await asyncio.gather(
|
||||
*[
|
||||
addon.refresh_path_cache()
|
||||
for addon in self.sys_addons.store.values()
|
||||
if addon.repository in updated_repos
|
||||
]
|
||||
)
|
||||
|
||||
# read data from repositories
|
||||
await self.load()
|
||||
await self._read_addons()
|
||||
|
||||
@Job(
|
||||
name="store_manager_add_repository",
|
||||
|
@ -185,7 +210,7 @@ class StoreManager(CoreSysAttributes, FileConfiguration):
|
|||
# Persist changes
|
||||
if persist:
|
||||
await self.data.update()
|
||||
self._read_addons()
|
||||
await self._read_addons()
|
||||
|
||||
async def remove_repository(self, repository: Repository, *, persist: bool = True):
|
||||
"""Remove a repository."""
|
||||
|
@ -205,7 +230,7 @@ class StoreManager(CoreSysAttributes, FileConfiguration):
|
|||
|
||||
if persist:
|
||||
await self.data.update()
|
||||
self._read_addons()
|
||||
await self._read_addons()
|
||||
|
||||
@Job(name="store_manager_update_repositories")
|
||||
async def update_repositories(
|
||||
|
@ -245,14 +270,14 @@ class StoreManager(CoreSysAttributes, FileConfiguration):
|
|||
|
||||
# Always update data, even there are errors, some changes may have succeeded
|
||||
await self.data.update()
|
||||
self._read_addons()
|
||||
await self._read_addons()
|
||||
|
||||
# Raise the first error we found (if any)
|
||||
for error in add_errors + remove_errors:
|
||||
if error:
|
||||
raise error
|
||||
|
||||
def _read_addons(self) -> None:
|
||||
async def _read_addons(self) -> None:
|
||||
"""Reload add-ons inside store."""
|
||||
all_addons = set(self.data.addons)
|
||||
|
||||
|
@ -268,8 +293,13 @@ class StoreManager(CoreSysAttributes, FileConfiguration):
|
|||
)
|
||||
|
||||
# new addons
|
||||
for slug in add_addons:
|
||||
self.sys_addons.store[slug] = AddonStore(self.coresys, slug)
|
||||
if add_addons:
|
||||
cache_updates: list[Awaitable[None]] = []
|
||||
for slug in add_addons:
|
||||
self.sys_addons.store[slug] = AddonStore(self.coresys, slug)
|
||||
cache_updates.append(self.sys_addons.store[slug].refresh_path_cache())
|
||||
|
||||
await asyncio.gather(*cache_updates)
|
||||
|
||||
# remove
|
||||
for slug in del_addons:
|
||||
|
|
|
@ -117,7 +117,7 @@ class GitRepo(CoreSysAttributes):
|
|||
conditions=[JobCondition.FREE_SPACE, JobCondition.INTERNET_SYSTEM],
|
||||
on_condition=StoreJobError,
|
||||
)
|
||||
async def pull(self):
|
||||
async def pull(self) -> bool:
|
||||
"""Pull Git add-on repo."""
|
||||
if self.lock.locked():
|
||||
_LOGGER.warning("There is already a task in progress")
|
||||
|
@ -140,10 +140,13 @@ class GitRepo(CoreSysAttributes):
|
|||
)
|
||||
)
|
||||
|
||||
# Jump on top of that
|
||||
await self.sys_run_in_executor(
|
||||
ft.partial(self.repo.git.reset, f"origin/{branch}", hard=True)
|
||||
)
|
||||
if changed := self.repo.commit(branch) != self.repo.commit(
|
||||
f"origin/{branch}"
|
||||
):
|
||||
# Jump on top of that
|
||||
await self.sys_run_in_executor(
|
||||
ft.partial(self.repo.git.reset, f"origin/{branch}", hard=True)
|
||||
)
|
||||
|
||||
# Update submodules
|
||||
await self.sys_run_in_executor(
|
||||
|
@ -160,6 +163,8 @@ class GitRepo(CoreSysAttributes):
|
|||
# Cleanup old data
|
||||
await self.sys_run_in_executor(ft.partial(self.repo.git.clean, "-xdf"))
|
||||
|
||||
return changed
|
||||
|
||||
except (
|
||||
git.InvalidGitRepositoryError,
|
||||
git.NoSuchPathError,
|
||||
|
|
|
@ -1,4 +1,5 @@
|
|||
"""Represent a Supervisor repository."""
|
||||
|
||||
import logging
|
||||
from pathlib import Path
|
||||
|
||||
|
@ -101,11 +102,11 @@ class Repository(CoreSysAttributes):
|
|||
return
|
||||
await self.git.load()
|
||||
|
||||
async def update(self) -> None:
|
||||
async def update(self) -> bool:
|
||||
"""Update add-on repository."""
|
||||
if self.type == StoreType.LOCAL or not self.validate():
|
||||
return
|
||||
await self.git.pull()
|
||||
if not self.validate():
|
||||
return False
|
||||
return self.type == StoreType.LOCAL or await self.git.pull()
|
||||
|
||||
async def remove(self) -> None:
|
||||
"""Remove add-on repository."""
|
||||
|
|
|
@ -37,6 +37,7 @@ from .sentry import capture_exception
|
|||
|
||||
_LOGGER: logging.Logger = logging.getLogger(__name__)
|
||||
|
||||
DBUS_INTERFACE_OBJECT_MANAGER: str = "org.freedesktop.DBus.ObjectManager"
|
||||
DBUS_INTERFACE_PROPERTIES: str = "org.freedesktop.DBus.Properties"
|
||||
DBUS_METHOD_GETALL: str = "org.freedesktop.DBus.Properties.GetAll"
|
||||
|
||||
|
@ -196,6 +197,13 @@ class DBus:
|
|||
return None
|
||||
return DBusCallWrapper(self, DBUS_INTERFACE_PROPERTIES)
|
||||
|
||||
@property
|
||||
def object_manager(self) -> DBusCallWrapper | None:
|
||||
"""Get object manager proxy interface."""
|
||||
if DBUS_INTERFACE_OBJECT_MANAGER not in self._proxies:
|
||||
return None
|
||||
return DBusCallWrapper(self, DBUS_INTERFACE_OBJECT_MANAGER)
|
||||
|
||||
async def get_properties(self, interface: str) -> dict[str, Any]:
|
||||
"""Read all properties from interface."""
|
||||
if not self.properties:
|
||||
|
|
|
@ -0,0 +1,115 @@
|
|||
"""Utilities for working with systemd journal export format."""
|
||||
from collections.abc import AsyncGenerator
|
||||
from datetime import UTC, datetime
|
||||
from functools import wraps
|
||||
|
||||
from aiohttp import ClientResponse
|
||||
|
||||
from supervisor.exceptions import MalformedBinaryEntryError
|
||||
from supervisor.host.const import LogFormatter
|
||||
|
||||
|
||||
def formatter(required_fields: list[str]):
|
||||
"""Decorate journal entry formatters with list of required fields.
|
||||
|
||||
Helper decorator that can be used for getting list of required fields for a journal
|
||||
formatter function using function.required_fields function attribute.
|
||||
"""
|
||||
|
||||
def decorator(func):
|
||||
@wraps(func)
|
||||
def wrapper(*args, **kwargs):
|
||||
return func(*args, **kwargs)
|
||||
|
||||
wrapper.required_fields = required_fields
|
||||
return wrapper
|
||||
|
||||
return decorator
|
||||
|
||||
|
||||
@formatter(["MESSAGE"])
|
||||
def journal_plain_formatter(entries: dict[str, str]) -> str:
|
||||
"""Format parsed journal entries as a plain message."""
|
||||
return entries["MESSAGE"]
|
||||
|
||||
|
||||
@formatter(
|
||||
[
|
||||
"__REALTIME_TIMESTAMP",
|
||||
"_HOSTNAME",
|
||||
"SYSLOG_IDENTIFIER",
|
||||
"_PID",
|
||||
"MESSAGE",
|
||||
]
|
||||
)
|
||||
def journal_verbose_formatter(entries: dict[str, str]) -> str:
|
||||
"""Format parsed journal entries to a journalctl-like format."""
|
||||
ts = datetime.fromtimestamp(
|
||||
int(entries["__REALTIME_TIMESTAMP"]) / 1e6, UTC
|
||||
).isoformat(sep=" ", timespec="milliseconds")
|
||||
ts = ts[: ts.index(".") + 4] # strip TZ offset
|
||||
|
||||
identifier = (
|
||||
f"{entries.get("SYSLOG_IDENTIFIER", "_UNKNOWN_")}[{entries["_PID"]}]"
|
||||
if "_PID" in entries
|
||||
else entries.get("SYSLOG_IDENTIFIER", "_UNKNOWN_")
|
||||
)
|
||||
|
||||
return f"{ts} {entries.get("_HOSTNAME", "")} {identifier}: {entries.get("MESSAGE", "")}"
|
||||
|
||||
|
||||
async def journal_logs_reader(
|
||||
journal_logs: ClientResponse,
|
||||
log_formatter: LogFormatter = LogFormatter.PLAIN,
|
||||
) -> AsyncGenerator[str, None]:
|
||||
"""Read logs from systemd journal line by line, formatted using the given formatter."""
|
||||
match log_formatter:
|
||||
case LogFormatter.PLAIN:
|
||||
formatter_ = journal_plain_formatter
|
||||
case LogFormatter.VERBOSE:
|
||||
formatter_ = journal_verbose_formatter
|
||||
case _:
|
||||
raise ValueError(f"Unknown log format: {log_formatter}")
|
||||
|
||||
async with journal_logs as resp:
|
||||
entries: dict[str, str] = {}
|
||||
while not resp.content.at_eof():
|
||||
line = await resp.content.readuntil(b"\n")
|
||||
# newline means end of message, also empty line is sometimes returned
|
||||
# at EOF (likely race between at_eof and EOF check in readuntil)
|
||||
if line == b"\n" or not line:
|
||||
if entries:
|
||||
yield formatter_(entries)
|
||||
entries = {}
|
||||
continue
|
||||
|
||||
# Journal fields consisting only of valid non-control UTF-8 codepoints
|
||||
# are serialized as they are (i.e. the field name, followed by '=',
|
||||
# followed by field data), followed by a newline as separator to the next
|
||||
# field. Note that fields containing newlines cannot be formatted like
|
||||
# this. Non-control UTF-8 codepoints are the codepoints with value at or
|
||||
# above 32 (' '), or equal to 9 (TAB).
|
||||
name, sep, data = line.partition(b"=")
|
||||
if not sep:
|
||||
# Other journal fields are serialized in a special binary safe way:
|
||||
# field name, followed by newline
|
||||
name = name[:-1] # strip \n
|
||||
# followed by a binary 64-bit little endian size value,
|
||||
length_raw = await resp.content.readexactly(8)
|
||||
length = int.from_bytes(length_raw, byteorder="little")
|
||||
# followed by the binary field data,
|
||||
data = await resp.content.readexactly(length + 1)
|
||||
# followed by a newline as separator to the next field.
|
||||
if not data.endswith(b"\n"):
|
||||
raise MalformedBinaryEntryError(
|
||||
f"Failed parsing binary entry {data}"
|
||||
)
|
||||
|
||||
name = name.decode("utf-8")
|
||||
if name not in formatter_.required_fields:
|
||||
# we must read to the end of the entry in the stream, so we can
|
||||
# only continue the loop here
|
||||
continue
|
||||
|
||||
# strip \n for simple fields before decoding
|
||||
entries[name] = data[:-1].decode("utf-8")
|
|
@ -7,7 +7,7 @@ from pathlib import Path
|
|||
from unittest.mock import MagicMock, PropertyMock, patch
|
||||
|
||||
from awesomeversion import AwesomeVersion
|
||||
from docker.errors import DockerException, NotFound
|
||||
from docker.errors import DockerException, ImageNotFound, NotFound
|
||||
import pytest
|
||||
from securetar import SecureTarFile
|
||||
|
||||
|
@ -748,3 +748,102 @@ def test_auto_update_available(coresys: CoreSys, install_addon_example: Addon):
|
|||
Addon, "version", new=PropertyMock(return_value=AwesomeVersion("test"))
|
||||
):
|
||||
assert install_addon_example.auto_update_available is False
|
||||
|
||||
|
||||
async def test_paths_cache(coresys: CoreSys, install_addon_ssh: Addon):
|
||||
"""Test cache for key paths that may or may not exist."""
|
||||
with patch("supervisor.addons.addon.Path.exists", return_value=True):
|
||||
assert not install_addon_ssh.with_logo
|
||||
assert not install_addon_ssh.with_icon
|
||||
assert not install_addon_ssh.with_changelog
|
||||
assert not install_addon_ssh.with_documentation
|
||||
|
||||
await coresys.store.reload(coresys.store.get("local"))
|
||||
assert install_addon_ssh.with_logo
|
||||
assert install_addon_ssh.with_icon
|
||||
assert install_addon_ssh.with_changelog
|
||||
assert install_addon_ssh.with_documentation
|
||||
|
||||
|
||||
async def test_addon_loads_wrong_image(
|
||||
coresys: CoreSys,
|
||||
install_addon_ssh: Addon,
|
||||
container: MagicMock,
|
||||
mock_amd64_arch_supported,
|
||||
):
|
||||
"""Test addon is loaded with incorrect image for architecture."""
|
||||
coresys.addons.data.save_data.reset_mock()
|
||||
install_addon_ssh.persist["image"] = "local/aarch64-addon-ssh"
|
||||
assert install_addon_ssh.image == "local/aarch64-addon-ssh"
|
||||
|
||||
with patch("pathlib.Path.is_file", return_value=True):
|
||||
await install_addon_ssh.load()
|
||||
|
||||
container.remove.assert_called_once_with(force=True)
|
||||
assert coresys.docker.images.remove.call_args_list[0].kwargs == {
|
||||
"image": "local/aarch64-addon-ssh:latest",
|
||||
"force": True,
|
||||
}
|
||||
assert coresys.docker.images.remove.call_args_list[1].kwargs == {
|
||||
"image": "local/aarch64-addon-ssh:9.2.1",
|
||||
"force": True,
|
||||
}
|
||||
coresys.docker.images.build.assert_called_once()
|
||||
assert (
|
||||
coresys.docker.images.build.call_args.kwargs["tag"]
|
||||
== "local/amd64-addon-ssh:9.2.1"
|
||||
)
|
||||
assert coresys.docker.images.build.call_args.kwargs["platform"] == "linux/amd64"
|
||||
assert install_addon_ssh.image == "local/amd64-addon-ssh"
|
||||
coresys.addons.data.save_data.assert_called_once()
|
||||
|
||||
|
||||
async def test_addon_loads_missing_image(
|
||||
coresys: CoreSys,
|
||||
install_addon_ssh: Addon,
|
||||
container: MagicMock,
|
||||
mock_amd64_arch_supported,
|
||||
):
|
||||
"""Test addon corrects a missing image on load."""
|
||||
coresys.docker.images.get.side_effect = ImageNotFound("missing")
|
||||
|
||||
with patch("pathlib.Path.is_file", return_value=True):
|
||||
await install_addon_ssh.load()
|
||||
|
||||
coresys.docker.images.build.assert_called_once()
|
||||
assert (
|
||||
coresys.docker.images.build.call_args.kwargs["tag"]
|
||||
== "local/amd64-addon-ssh:9.2.1"
|
||||
)
|
||||
assert coresys.docker.images.build.call_args.kwargs["platform"] == "linux/amd64"
|
||||
assert install_addon_ssh.image == "local/amd64-addon-ssh"
|
||||
|
||||
|
||||
async def test_addon_load_succeeds_with_docker_errors(
|
||||
coresys: CoreSys,
|
||||
install_addon_ssh: Addon,
|
||||
container: MagicMock,
|
||||
caplog: pytest.LogCaptureFixture,
|
||||
mock_amd64_arch_supported,
|
||||
):
|
||||
"""Docker errors while building/pulling an image during load should not raise and fail setup."""
|
||||
# Build env invalid failure
|
||||
coresys.docker.images.get.side_effect = ImageNotFound("missing")
|
||||
caplog.clear()
|
||||
await install_addon_ssh.load()
|
||||
assert "Invalid build environment" in caplog.text
|
||||
|
||||
# Image build failure
|
||||
coresys.docker.images.build.side_effect = DockerException()
|
||||
caplog.clear()
|
||||
with patch("pathlib.Path.is_file", return_value=True):
|
||||
await install_addon_ssh.load()
|
||||
assert "Can't build local/amd64-addon-ssh:9.2.1" in caplog.text
|
||||
|
||||
# Image pull failure
|
||||
install_addon_ssh.data["image"] = "test/amd64-addon-ssh"
|
||||
coresys.docker.images.build.reset_mock(side_effect=True)
|
||||
coresys.docker.images.pull.side_effect = DockerException()
|
||||
caplog.clear()
|
||||
await install_addon_ssh.load()
|
||||
assert "Unknown error with test/amd64-addon-ssh:9.2.1" in caplog.text
|
||||
|
|
|
@ -1,7 +1,5 @@
|
|||
"""Validate Add-on configs."""
|
||||
|
||||
import logging
|
||||
from unittest.mock import Mock
|
||||
|
||||
import pytest
|
||||
import voluptuous as vol
|
||||
|
@ -288,14 +286,3 @@ def test_valid_slug():
|
|||
config["slug"] = "complemento telefónico"
|
||||
with pytest.raises(vol.Invalid):
|
||||
assert vd.SCHEMA_ADDON_CONFIG(config)
|
||||
|
||||
|
||||
def test_invalid_discovery(capture_event: Mock, caplog: pytest.LogCaptureFixture):
|
||||
"""Test invalid discovery."""
|
||||
config = load_json_fixture("basic-addon-config.json")
|
||||
config["discovery"] = ["mqtt", "junk", "junk2"]
|
||||
|
||||
assert vd.SCHEMA_ADDON_CONFIG(config)
|
||||
|
||||
with caplog.at_level(logging.WARNING):
|
||||
assert "unknown services for discovery: junk, junk2" in caplog.text
|
||||
|
|
|
@ -86,7 +86,7 @@ async def test_image_added_removed_on_update(
|
|||
DockerAddon, "_build"
|
||||
) as build:
|
||||
await coresys.addons.update(TEST_ADDON_SLUG)
|
||||
build.assert_called_once_with(AwesomeVersion("11.0.0"))
|
||||
build.assert_called_once_with(AwesomeVersion("11.0.0"), "local/amd64-addon-ssh")
|
||||
install.assert_not_called()
|
||||
|
||||
|
||||
|
@ -393,7 +393,7 @@ async def test_store_data_changes_during_update(
|
|||
update_task = coresys.create_task(simulate_update())
|
||||
await asyncio.sleep(0)
|
||||
|
||||
with patch.object(Repository, "update"):
|
||||
with patch.object(Repository, "update", return_value=True):
|
||||
await coresys.store.reload()
|
||||
|
||||
assert "image" not in coresys.store.data.addons["local_ssh"]
|
||||
|
|
|
@ -1 +1,66 @@
|
|||
"""Test for API calls."""
|
||||
from unittest.mock import MagicMock
|
||||
|
||||
from aiohttp.test_utils import TestClient
|
||||
|
||||
from supervisor.host.const import LogFormat
|
||||
|
||||
DEFAULT_LOG_RANGE = "entries=:-100:"
|
||||
|
||||
|
||||
async def common_test_api_advanced_logs(
|
||||
path_prefix: str,
|
||||
syslog_identifier: str,
|
||||
api_client: TestClient,
|
||||
journald_logs: MagicMock,
|
||||
):
|
||||
"""Template for tests of endpoints using advanced logs."""
|
||||
resp = await api_client.get(f"{path_prefix}/logs")
|
||||
assert resp.status == 200
|
||||
assert resp.content_type == "text/plain"
|
||||
|
||||
journald_logs.assert_called_once_with(
|
||||
params={"SYSLOG_IDENTIFIER": syslog_identifier},
|
||||
range_header=DEFAULT_LOG_RANGE,
|
||||
accept=LogFormat.JOURNAL,
|
||||
)
|
||||
|
||||
journald_logs.reset_mock()
|
||||
|
||||
resp = await api_client.get(f"{path_prefix}/logs/follow")
|
||||
assert resp.status == 200
|
||||
assert resp.content_type == "text/plain"
|
||||
|
||||
journald_logs.assert_called_once_with(
|
||||
params={"SYSLOG_IDENTIFIER": syslog_identifier, "follow": ""},
|
||||
range_header=DEFAULT_LOG_RANGE,
|
||||
accept=LogFormat.JOURNAL,
|
||||
)
|
||||
|
||||
journald_logs.reset_mock()
|
||||
|
||||
resp = await api_client.get(f"{path_prefix}/logs/boots/0")
|
||||
assert resp.status == 200
|
||||
assert resp.content_type == "text/plain"
|
||||
|
||||
journald_logs.assert_called_once_with(
|
||||
params={"SYSLOG_IDENTIFIER": syslog_identifier, "_BOOT_ID": "ccc"},
|
||||
range_header=DEFAULT_LOG_RANGE,
|
||||
accept=LogFormat.JOURNAL,
|
||||
)
|
||||
|
||||
journald_logs.reset_mock()
|
||||
|
||||
resp = await api_client.get(f"{path_prefix}/logs/boots/0/follow")
|
||||
assert resp.status == 200
|
||||
assert resp.content_type == "text/plain"
|
||||
|
||||
journald_logs.assert_called_once_with(
|
||||
params={
|
||||
"SYSLOG_IDENTIFIER": syslog_identifier,
|
||||
"_BOOT_ID": "ccc",
|
||||
"follow": "",
|
||||
},
|
||||
range_header=DEFAULT_LOG_RANGE,
|
||||
accept=LogFormat.JOURNAL,
|
||||
)
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue