Compare commits

..

17 Commits

Author SHA1 Message Date
f59d3fbceb fix 2025-12-06 09:41:54 +01:00
6d8ec79516 fix 2025-12-06 09:36:27 +01:00
18294bfcac fix 2025-12-06 09:28:08 +01:00
dc0d51bcc4 fix 2025-12-03 16:53:09 +01:00
ca0a76ccb8 fix 2025-12-03 16:46:36 +01:00
7b102eca12 fix 2025-12-03 16:29:29 +01:00
29b2adfb55 Add alloy 2025-12-03 15:29:37 +01:00
0912f7866e fix 2025-11-03 22:25:47 +01:00
2b2006a02c fix 2025-11-03 22:24:24 +01:00
5f4c9e26bc fix 2025-11-03 22:22:58 +01:00
1a8c643079 fix 2025-11-03 22:20:23 +01:00
66d7b9884e fix 2025-11-03 22:18:38 +01:00
90e540a363 fix 2025-11-03 22:14:47 +01:00
86b46aa29f Update versions 2025-11-03 22:12:01 +01:00
1576ac987b fix 2025-03-17 00:20:38 +01:00
0befdc45b7 fix 2025-03-17 00:13:25 +01:00
02cd25b907 Update 2024-11-19 23:40:22 +01:00
42 changed files with 445 additions and 1042 deletions

BIN
.DS_Store vendored

Binary file not shown.

1
.gitignore vendored Normal file
View File

@@ -0,0 +1 @@
.DS_Store

View File

@@ -10,6 +10,7 @@ ARG BUILD_REF
ARG BUILD_REPOSITORY
ARG BUILD_VERSION
ARG TELEGRAF_VERSION
ARG HAPROXY_BRANCH
ARG HAPROXY_VERSION
ARG HAPROXY_SHA256
@@ -50,9 +51,9 @@ RUN set -eux; \
mkdir /var/lib/haproxy; \
chown haproxy:haproxy /var/lib/haproxy
ENV HAPROXY_VERSION ${HAPROXY_VERSION}
ENV HAPROXY_URL https://www.haproxy.org/download/2.8/src/haproxy-${HAPROXY_VERSION}.tar.gz
ENV HAPROXY_SHA256 ${HAPROXY_SHA256}
ENV HAPROXY_VERSION=${HAPROXY_VERSION}
ENV HAPROXY_URL=https://www.haproxy.org/download/${HAPROXY_BRANCH}/src/haproxy-${HAPROXY_VERSION}.tar.gz
ENV HAPROXY_SHA256=${HAPROXY_SHA256}
RUN set -eux; \
\

View File

@@ -1,9 +1,10 @@
build_from:
aarch64: ghcr.io/hassio-addons/base:16.1.2
amd64: ghcr.io/hassio-addons/base:16.1.2
armhf: ghcr.io/hassio-addons/base:16.1.2
armv7: ghcr.io/hassio-addons/base:16.1.2
i386: ghcr.io/hassio-addons/base:16.1.2
aarch64: ghcr.io/hassio-addons/base:19.0.0
amd64: ghcr.io/hassio-addons/base:19.0.0
armhf: ghcr.io/hassio-addons/base:19.0.0
armv7: ghcr.io/hassio-addons/base:19.0.0
i386: ghcr.io/hassio-addons/base:19.0.0
args:
HAPROXY_VERSION: 2.8.10
HAPROXY_SHA256: 0d63cd46d9d10ac7dbc02f3c6769c1908f221e0a5c5b655a194655f7528d612a
HAPROXY_BRANCH: "3.2"
HAPROXY_VERSION: 3.2.9
HAPROXY_SHA256: e660d141b29019f4d198785b0834cc3e9c96efceeb807c2fff2fc935bd3354c2

View File

@@ -1,6 +1,6 @@
---
name: Docker-Socket-Proxy
version: 2.8.10_1
version: 3.2.9_1
slug: hassio_docker_socket_proxy
description: An addon to enable TCP docker access.
url: https://gitea.bonelle-family.dscloud.biz/francois.bonelle/hassio-repo.git

34
grafana_alloy/Dockerfile Normal file
View File

@@ -0,0 +1,34 @@
ARG BUILD_FROM
FROM $BUILD_FROM
ARG \
BUILD_ARCH \
BUILD_VERSION \
GRAFANA_ALLOY_VERSION
LABEL \
io.hass.version=${BUILD_VERSION} \
io.hass.type="addon" \
io.hass.arch="${BUILD_ARCH}"
RUN apt-get update && \
apt-get install -y --no-install-recommends \
unzip \
gettext-base \
curl && \
rm -rf /var/lib/apt/lists/* && \
apt clean && \
ARCH="${BUILD_ARCH}" && \
if [ "${BUILD_ARCH}" = "aarch64" ]; then ARCH="arm64"; fi && \
curl -J -L -o /tmp/alloy.zip "https://github.com/grafana/alloy/releases/download/v${GRAFANA_ALLOY_VERSION}/alloy-linux-${ARCH}.zip" && \
cd /tmp && \
unzip alloy.zip && \
mv alloy-linux-${ARCH} /usr/local/bin/alloy && \
chmod +x /usr/local/bin/alloy && \
rm -rf /tmp/alloy*
COPY rootfs /
RUN chmod +x /run.sh /etc/cont-init.d/alloy_setup.sh /etc/services.d/alloy/run
ENTRYPOINT []
CMD ["/run.sh"]

84
grafana_alloy/README.md Normal file
View File

@@ -0,0 +1,84 @@
# Grafana Alloy
[Grafana Alloy](https://grafana.com/docs/alloy) combines the strengths of the leading collectors into one place. Whether observing applications, infrastructure, or both, Grafana Alloy can collect, process, and export telemetry signals to scale and future-proof your observability approach.
Currently, this add-on supports the following components:
- [prometheus.scrape](https://grafana.com/docs/alloy/latest/reference/components/prometheus/prometheus.scrape/) - Sends metrics to Prometheus write endpoint.
- [prometheus.exporter.unix](https://grafana.com/docs/alloy/latest/reference/components/prometheus/prometheus.exporter.unix/) - Uses the [node_exporter](https://github.com/prometheus/node_exporter) to expose Home Assistant Hardware and OS metrics for \*nix-based systems.
- [prometheus.exporter.process](https://grafana.com/docs/alloy/latest/reference/components/prometheus/prometheus.exporter.process/) - Enables [process_exporter](https://github.com/ncabatoff/process-exporter) to collect Home Assistant process stats from /proc.
- [loki.write](https://grafana.com/docs/alloy/latest/reference/components/loki/loki.write/) - Sends logs to Loki instance.
- [loki.source.journal](https://grafana.com/docs/alloy/latest/reference/components/loki/loki.source.journal/) - Collects Home Assistant Journal logs to send to Loki.
## Installation
1. Add [repository](https://github.com/wymangr/hassos-addons) to Home Assistant.
1. Search for "Grafana Alloy" in the Home Assistant add-on store and install it.
1. Disable "Protection mode" in the add-on panel. (Optional, [see below for more details](#protection-mode))
1. Update configuration on the add-on "Configuration" Tab. See options below.
1. Start the add-on.
1. Check the `Logs` to confirm the add-on started successfully.
1. You can also visit the Grafana Alloy Web UI by visiting `http://<homeassistnat_ip>:12345` in your browser.
## Protection Mode
Disabling protection mode is optional, however there are a few things that I found don't work without disabling it. Most the limitations are around host processes. Per the Home Assistant Docs: _"Allow the container to run on the host PID namespace. Works only for not protected add-ons."_
Note: These are just the limitations I found, there may be other incorrect or missing metrics.
**Only disable the protection mode if you know, need AND trust the source of this add-on.** Always review the code of an add-on before disabling protection mode.
### Limitations:
**prometheus.exporter.process**
- If Protection mode is enabled, the only process that will be collected is the one for Alloy. There will be no metrics for host processes.
**prometheus.exporter.unix**
- Process related metrics won't display any host process information with protection mode enabled.
- Disk metrics will only show mount data for the Alloy add-on, no host mount data will be collected with protection mode enabled.
**loki.source.journal**
No limitations that I found.
## Configuration
| Config | Description | Default value | Required |
| ---------------------------- | ---------------------------------------------------------------------------------------- | ----------------------------------- | --------------------------- |
| `enable_prometheus` | Enable sending metrics to Prometheus. If enabled, prometheus_write_endpoint is required. | true | No |
| `prometheus_write_endpoint` | Full URL to send metrics to. | http://prometheus:9090/api/v1/write | If `enable_prometheus`=true |
| `enable_unix_component` | Enables prometheus.exporter.unix component to collect node_exporter metrics. | true | No |
| `enable_process_component` | Enables prometheus.exporter.process component to collect process_exporter metrics. | true | No |
| `prometheus_scrape_interval` | How frequently to scrape the targets of this scrape configuration. | | No |
| `servername_tag` | servername tag value value. | HomeAssistant | No |
| `instance_tag` | Overwrite the default metric "instance" tag. | | No |
| `enable_loki` | Enable sending logs to Loki. If enabled, loki_endpoint is required. | false | No |
| `loki_endpoint` | Full Loki URL to send logs to. | http://loki:3100/api/v1/push | No |
| `enable_loki_syslog` | Listen for syslog messages over UDP or TCP connections and forwards them to loki. | false | No |
| `override_config` | If enabled, all other options will be ignored and you can supply your own Alloy config. | false | No |
| `override_config_path` | Path to Override Alloy config file. HA config directory is counted to /config. | /config/alloy/example.alloy | If `override_config`=true |
If `override_config` is true and a valid Alloy config file is supplied in `override_config_path`, all other options will be ignored.
## Support
- Tested on `aarch64` and `amd64`.
## Todo
- [x] Add more customization options (Enable/disable components, scrape_interval, etc..)
- [ ] Add Github workflows
- [ ] Build and publish a docker image so users don't have to build the image on every install
- [x] Verify all permissions added to `config.yaml` are required and remove unneeded ones
## Example Data
https://grafana.com/grafana/dashboards/1860-node-exporter-full/
![prometheus.exporter.unix Example](images/prometheus.exporter.unix.png)
https://grafana.com/grafana/dashboards/8378-system-processes-metrics/
![prometheus.exporter.process Example](images/prometheus.exporter.process.png)
![Loki Log Example](images/loki.png)

View File

@@ -1,6 +1,6 @@
#include <tunables/global>
profile hassio_telegraf flags=(attach_disconnected,mediate_deleted) {
profile grafana_alloy flags=(attach_disconnected,mediate_deleted) {
#include <abstractions/base>
# Capabilities
@@ -28,13 +28,14 @@ profile hassio_telegraf flags=(attach_disconnected,mediate_deleted) {
/data/** rw,
# Start new profile for service
/usr/bin/myprogram cx -> myprogram,
/usr/local/bin/alloy cx -> alloy,
profile myprogram flags=(attach_disconnected,mediate_deleted) {
profile alloy flags=(attach_disconnected,mediate_deleted) {
#include <abstractions/base>
ptrace (trace,read),
# Receive signals from S6-Overlay
signal (receive) peer=*_ADDON_SLUG,
signal (receive) peer=*_grafana_alloy,
# Access to options.json and other files within your addon
/data/** rw,
@@ -42,11 +43,27 @@ profile hassio_telegraf flags=(attach_disconnected,mediate_deleted) {
# Access to mapped volumes specified in config.json
/share/** rw,
# Temp files (Loki)
/tmp/.positions.* rw,
# certificates
/etc/ssl/certs/{,**} r,
/usr/share/ca-certificates/{,**} r,
# Access required for service functionality
/usr/bin/myprogram r,
/usr/local/bin/alloy rm,
/config/** rw,
/etc/alloy/config.alloy r,
/var/log/journal/{,**} r,
/etc/nsswitch.conf r,
/proc/{,**} r,
/sys/** r,
/etc/hosts r,
/etc/resolv.conf r,
/bin/bash rix,
/bin/echo ix,
/etc/passwd r,
/dev/tty rw,
/var/run/docker.sock r,
}
}

6
grafana_alloy/build.yaml Normal file
View File

@@ -0,0 +1,6 @@
---
build_from:
aarch64: ghcr.io/hassio-addons/debian-base:9.1.0
amd64: ghcr.io/hassio-addons/debian-base:9.1.0
args:
GRAFANA_ALLOY_VERSION: 1.12.0

54
grafana_alloy/config.yaml Normal file
View File

@@ -0,0 +1,54 @@
---
name: "Grafana Alloy"
description: "Grafana Alloy"
version: "1.12.0"
slug: "grafana_alloy"
arch:
- aarch64
- amd64
- armv7
- armhf
ports:
12345/tcp:
5514/udp:
5601/tcp:
ports_description:
12345/tcp: Alloy web server
5514/udp: Alloy UDP syslog
5601/tcp: Alloy TCP syslog
journald: true
host_network: true
hassio_api: true
homeassistant_api: true
auth_api: true
docker_api: true
host_pid: true
apparmor: false
map:
- type: homeassistant_config
path: /config
options:
enable_prometheus: true
prometheus_write_endpoint: http://prometheus:9090/api/v1/write
enable_unix_component: true
enable_process_component: true
prometheus_scrape_interval: 15s
servername_tag: "HomeAssistant"
enable_loki: false
loki_endpoint: http://loki:3100/api/v1/push
enable_loki_syslog: false
override_config: false
override_config_path: "/config/alloy/example.alloy"
schema:
enable_prometheus: bool
prometheus_write_endpoint: str?
enable_unix_component: bool
enable_process_component: bool
prometheus_scrape_interval: list(15s|30s|60s)
servername_tag: str?
instance_tag: str?
enable_loki: bool
loki_endpoint: str?
enable_loki_syslog: bool
override_config: bool
override_config_path: str?

BIN
grafana_alloy/icon.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 56 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 392 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 379 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 287 KiB

BIN
grafana_alloy/logo.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 56 KiB

View File

@@ -0,0 +1,10 @@
$PROMETHEUS_CONFIG
$UNIX_CONFIG
$PROCESS_CONFIG
$ALLOY_CONFIG
$LOKI_CONFIG

View File

@@ -0,0 +1,160 @@
#!/usr/bin/env bashio
readonly CONFIG_DIR=/etc/alloy
readonly CONFIG_FILE="${CONFIG_DIR}/config.alloy"
readonly CONFIG_TEMPLATE="${CONFIG_DIR}/config.alloy.template"
if bashio::config.true 'override_config'; then
if bashio::config.is_empty 'override_config_path'; then
bashio::config.require 'override_config_path' "Config override is Enabled, must set override_config_path"
fi
else
# Add Prometheus Write Endpoint
if bashio::config.true 'enable_prometheus'; then
bashio::config.require 'prometheus_write_endpoint' "You need to supply Prometheus write endpoint"
EXTERNAL_LABELS=""
RELABEL_CONFIG=""
# Prometheus Write Endpoint
if bashio::config.has_value 'prometheus_write_endpoint'; then
PROMETHEUS_ENDPOINT="$(bashio::config "prometheus_write_endpoint")"
fi
# Servername External Label
if bashio::config.has_value 'servername_tag'; then
EXTERNAL_LABELS="
external_labels = {
\"servername\" = \"$(bashio::config "servername_tag")\",
}"
fi
# Relabel "instance" tag if configured
if bashio::config.has_value 'instance_tag'; then
RELABEL_CONFIG="
write_relabel_config {
action = \"replace\"
source_labels = [\"instance\"]
target_label = \"instance\"
replacement = \"$(bashio::config "instance_tag")\"
}"
fi
export PROMETHEUS_CONFIG="
prometheus.remote_write \"default\" {
endpoint {
url = \"$PROMETHEUS_ENDPOINT\"
metadata_config {
send_interval = \"$(bashio::config "prometheus_scrape_interval")\"
}
$RELABEL_CONFIG
}
$EXTERNAL_LABELS
}"
## Enable prometheus.exporter.unix
if bashio::config.true 'enable_unix_component'; then
export UNIX_CONFIG="
prometheus.exporter.unix \"node_exporter\" { }
prometheus.scrape \"unix\" {
targets = prometheus.exporter.unix.node_exporter.targets
forward_to = [prometheus.remote_write.default.receiver]
scrape_interval = \"$(bashio::config "prometheus_scrape_interval")\"
}"
fi
## Enable prometheus.exporter.process
if bashio::config.true 'enable_process_component'; then
export PROCESS_CONFIG="
prometheus.exporter.process \"process_exporter\" {
matcher {
name = \"{{.Comm}}\"
cmdline = [\".+\"]
}
}
prometheus.scrape \"process\" {
targets = prometheus.exporter.process.process_exporter.targets
forward_to = [prometheus.remote_write.default.receiver]
scrape_interval = \"$(bashio::config "prometheus_scrape_interval")\"
}"
fi
export ALLOY_CONFIG="
prometheus.exporter.self \"alloy\" { }
prometheus.scrape \"self\" {
targets = prometheus.exporter.self.alloy.targets
forward_to = [prometheus.remote_write.default.receiver]
scrape_interval = \"$(bashio::config "prometheus_scrape_interval")\"
}"
fi
# Add Loki to config if endpoint is supplied
if bashio::config.true 'enable_loki'; then
bashio::config.require 'loki_endpoint' "You need to supply Loki endpoint"
if bashio::config.has_value 'servername_tag'; then
labels="{component = \"loki.source.journal\", servername = \"$(bashio::config "servername_tag")\"}"
else
labels="{component = \"loki.source.journal\"}"
fi
if bashio::config.true 'enable_loki_syslog'; then
syslog_config="
loki.source.syslog \"syslog\" {
listener {
address = \"0.0.0.0:5601\"
labels = { component = \"loki.source.syslog\", protocol = \"tcp\" }
}
listener {
address = \"0.0.0.0:5514\"
protocol = \"udp\"
labels = { component = \"loki.source.syslog\", protocol = \"udp\"}
}
forward_to = [loki.write.endpoint.receiver]
}"
else
syslog_config=""
fi
export LOKI_CONFIG="
loki.relabel \"journal\" {
forward_to = []
rule {
source_labels = [\"__journal__systemd_unit\"]
target_label = \"unit\"
}
rule {
source_labels = [\"__journal__hostname\"]
target_label = \"nodename\"
}
rule {
source_labels = [\"__journal_syslog_identifier\"]
target_label = \"syslog_identifier\"
}
rule {
source_labels = [\"__journal_container_name\"]
target_label = \"container_name\"
}
rule {
action = \"drop\"
source_labels = [\"syslog_identifier\"]
regex = \"audit\"
}
}
loki.source.journal \"read\" {
forward_to = [loki.write.endpoint.receiver]
relabel_rules = loki.relabel.journal.rules
labels = $labels
path = \"/var/log/journal\"
}
$syslog_config
loki.write \"endpoint\" {
endpoint {
url = \"$(bashio::config "loki_endpoint")\"
}
}"
fi
envsubst < $CONFIG_TEMPLATE > $CONFIG_FILE
fi

View File

@@ -0,0 +1,18 @@
#!/usr/bin/env bashio
# ==============================================================================
# Home Assistant Community Add-on: Grafana Alloy
# Runs the Grafana Alloy
# ==============================================================================
OVERRIDE_CONFIG=$(bashio::config 'override_config_path')
if bashio::config.false 'override_config'; then
CONFIG_FILE=/etc/alloy/config.alloy
else
CONFIG_FILE=$OVERRIDE_CONFIG
fi
bashio::log.info "Starting Grafana Alloy with ${CONFIG_FILE}"
bashio::log.info "$(cat ${CONFIG_FILE})"
# Run Alloy
exec /usr/local/bin/alloy run --server.http.listen-addr=0.0.0.0:12345 --disable-reporting --storage.path=/data $CONFIG_FILE

5
grafana_alloy/rootfs/run.sh Executable file
View File

@@ -0,0 +1,5 @@
#!/usr/bin/env bashio
/etc/cont-init.d/alloy_setup.sh
exec /etc/services.d/alloy/run

View File

@@ -0,0 +1,38 @@
---
configuration:
enable_prometheus:
name: Enable Prometheus Metrics
description: Enable sending metrics to Prometheus. If enabled, prometheus_write_endpoint is required
prometheus_write_endpoint:
name: Prometheus Write Endpoint
description: Full URL to send metrics to.
enable_unix_component:
name: Enable Unix System Metrics
description: Enables prometheus.exporter.unix component to collect node_exporter metrics
enable_process_component:
name: Enable Process Metrics
description: Enables prometheus.exporter.process component to collect process_exporter metrics
prometheus_scrape_interval:
name: Prometheus Scrape Interval
description: How frequently to scrape the targets of this scrape configuration
servername_tag:
name: Servername Tag
description: servername tag value
instance_tag:
name: Instance Tag
description: Overwrite the default metric "instance" tag
enable_loki:
name: Enable Loki
description: Enable sending logs to Loki. If enabled, loki_endpoint is required
loki_endpoint:
name: Loki Endpoint
description: Full Loki URL to send logs to
override_config:
name: Override Config
description: If enabled, all other options will be ignored and you can supply your own Alloy config
override_config_path:
name: Override Config Path
description: Path to Override Alloy config file
enable_loki_syslog:
name: Enable Loki Syslog
description: Listen for syslog messages over UDP or TCP connections and forwards them to loki

View File

@@ -1,327 +0,0 @@
# Home Assistant Add-on: Promtail
## Install
First add the repository to the add-on store (`https://gitea.bonelle-family.dscloud.biz/francois.bonelle/hassio-repo.git`):
[![Open your Home Assistant instance and show the add add-on repository dialog
with a specific repository URL pre-filled.][add-repo-shield]][add-repo]
Then find Promtail in the store and click install:
[![Open your Home Assistant instance and show the dashboard of a Supervisor add-on.][add-addon-shield]][add-addon]
## Default Setup
By default this addon version of Promtail will tail logs from the systemd
journal. This will include all logs from all addons, supervisor, home assistant,
docker, and the host system itself. It will then ship them to the Loki add-on in
this same repository if you have it installed. No additional configuration is
required if this is the setup you want.
If you adjusted the configuration of the Loki add-on, have a separate Loki
add-on or have other log files you want Promtail to monitor then see below for
the configuration options.
## Configuration
**Note**: _Remember to restart the add-on when the configuration is changed._
Example add-on configuration:
```yaml
client:
url: http://39bd2704-loki:3100
username: loki
password: secret
cafile: /ssl/ca.pem
additional_scrape_configs: /share/promtail/scrape_configs.yaml
log_level: info
```
**Note**: _This is just an example, don't copy and paste it! Create your own!_
### Option: `client.url` (required)
The URL of the Loki deployment Promtail should ship logs to.
If you use the Loki add-on, this will be `http://39bd2704-loki:3100` (unless you
enabled `ssl`, then change it to `https`). If you use Grafana Cloud then the URL
will look like this: `https://<User>:<Your Grafana.com API Key>@logs-prod-us-central1.grafana.net/api/prom/push`
([see here for more info][grafana-cloud-docs-promtail]).
### Option: `client.username`
The username to use if you require basic auth to connect to your Loki deployment.
### Option: `client.password`
The password for the username you choose if you require basic auth to connect to
your Loki deployment. **Note**: This field is required if `client.username` is
provided.
### Option: `client.cafile`
The CA certificate used to sign Loki's certificate if Loki is using a self-signed
certificate for SSL.
**Note**: _The file MUST be stored in `/ssl/`, which is the default_
### Option: `client.servername`
The servername listed on the certificate Loki is using if using SSL to connect
by a different URL then what's on Loki's certificate (usually if the certificate
lists a public URL and you're connecting locally).
### Option: `client.certfile`
The absolute path to a certificate for client-authentication if Loki is using
mTLS to authenticate clients.
### Option: `client.keyfile`
The absolute path to the key for the client-authentication certificate if Loki
is using mTLS to authenticate clients. **Note**: This field is required if
`client.certfile` is provided
### Option: `additional_pipeline_stages`
The absolute path to a YAML file with a list of additional pipeline stages to
apply to the [default journal scrape config][addon-default-config]. The primary
use of this is to apply additional processing to logs from particular add-ons
you use if they are noisy or difficult to read.
This file must contain only a YAML list of pipeline stages. They will be added
to the end of the ones already listed. If you don't like the ones listed, use
`skip_default_scrape_config` and `additional_scrape_configs` to write your own
instead. Here's an example of the contents of this file:
```yaml
- match:
selector: '{container_name="addon_cebe7a76_hassio_google_drive_backup"}'
stages:
- multiline:
firstline: '^\x{001b}'
```
This particular example applies to the [google drive backup addon][addon-google-drive-backup].
It uses the same log format as Home Assistant and outputs the escape character
at the start of each log line for color-coding in terminals. Looking for that
in a multiline stage makes it so tracebacks are included in the same log entry
as the error that caused them for easier readability.
See the [promtail documentation][promtail-doc-stages] for more information on how
to configure pipeline stages.
**Note**: This addon has access to `/ssl`, `/share` and `/config/promtail`. Place
the file in one of these locations, others will not work.
### Option: `skip_default_scrape_config`
Promtail will scrape the `systemd journal` using a pre-defined config you can
find [here][addon-default-config]. If you only want it to look at specific log
files you specify or you don't like the default config and want to adjust it, set
this to `true`. Then the only scrape configs used will be the ones you specify
in the `additional_scrape_configs` file.
**Note**: This addon has access to `/ssl`, `/share` and `/config/promtail`. Place
the file in one of these locations, others will not work.
### Option: `additional_scrape_configs`
The absolute path to a YAML file with a list of additional scrape configs for
Promtail to use. The primary use of this is to point Promtail at additional log
files created by add-ons which don't use `stdout` for all logging. You an also
change the system journal is scraped by using this in conjunction with
`skip_default_scrape_config`. **Note**: If `skip_default_scrape_config` is `true`
then this field becomes required (otherwise there would be no scrape configs)
The file must contain only a YAML list of scrape configs. Here's an example of
the contents of this file:
```yaml
- job_name: zigbee2mqtt_messages
pipeline_stages:
static_configs:
- targets:
- localhost
labels:
job: zigbee2mqtt_messages
__path__: /share/zigbee2mqtt/log/**.txt
```
This particular example would cause Promtail to scrape up the logs MQTT that the
[Zigbee2MQTT add-on][addon-z2m] makes by default.
Promtail provides a lot of options for configuring scrape configs. See the
documentation on [scrape_configs][promtail-doc-scrape-configs] for more info on
the options available and how to configure them. The documentation also provides
[other examples][promtail-doc-examples] you can use.
I would also recommend reading the [Loki best practices][loki-doc-best-practices]
guide before making custom scrape configs. Pipelines are pretty powerful but
avoid making too many labels, it does more harm then good. Instead look into
what you can do with [LogQL][logql] can do at the other end.
**Note**: This addon has access to `/ssl`, `/share` and `/config/promtail`. Place
the file in one of these locations, others will not work.
### Port: `9080/tcp`
Promtail expose an [API][api] on this port. This is primarily used for healthchecks
by the supervisor watchdog which does not require exposing it on the host. Most
users should leave this option disabled unless you have an external application
doing healthchecks.
For advanced users creating custom scrape configs the other purpose of this API
is to expose metrics created by the [metrics][promtail-doc-metrics] pipeline
stage. Exposing this port would then allow you to read those metrics from another
system on your network.
### Option: `log_level`
The `log_level` option controls the level of log output by the addon and can
be changed to be more or less verbose, which might be useful when you are
dealing with an unknown issue. Possible values are:
- `debug`: Shows detailed debug information.
- `info`: Normal (usually) interesting events.
- `warning`: Exceptional occurrences that are not errors.
- `error`: Runtime errors that do not require immediate action.
Please note that each level automatically includes log messages from a
more severe level, e.g., `debug` also shows `info` messages. By default,
the `log_level` is set to `info`, which is the recommended setting unless
you are troubleshooting.
## PLG Stack (Promtail, Loki and Grafana)
Promtail isn't a standalone application, it's job is to find logs, process them
and ship them to Loki. Most likely you want the full PLG stack:
- Promtail to process and ship logs
- Loki to aggregate and index them
- Grafana to visualize and monitor them
### Loki
The easiest way to get started is to also install the Loki add-on in this same
repository. By default this add-on is set up to collect all logs from the system
journal and ship them over to that add-on. If that's what you want there is no
additional configuration required for either.
[![Open your Home Assistant instance and show the dashboard of a Supervisor add-on.][add-addon-shield]][add-addon-loki]
Alternatively you can deploy Loki somewhere else. Take a look at the
[Loki documentation][loki-doc] for info on setting up a Loki deployment and
connecting Promtail to it.
### Grafana
Once you have Loki and Promtail set up you will probably want to connect to it
from [Grafana][grafana]. The easiest way to do that is to use the
[Grafana community add-on][addon-grafana]. From there you can find Loki in the
list of data sources and configure the connection (see [Loki in Grafana][loki-in-grafana]).
If you did choose to use the Loki add-on you can find additional instructions in
[the add-on's documentation][addon-loki-doc].
[![Open your Home Assistant instance and show the dashboard of a Supervisor add-on.][add-addon-shield]][add-addon-grafana]
### Grafana Cloud
If you prefer, you can also use Grafana's cloud service,
[see here](https://grafana.com/products/cloud/) on how to get started. This
service takes the place of both Loki and Grafana in the PLG stack, you just
point Promtail at it and you're done. To do this, first create an account then
[review this guide][grafana-cloud-docs-promtail] to see how to connect Promtail
to your account. Essentially its just a different URL since the credential information
goes in the URL.
## Changelog & Releases
This repository keeps a change log using [GitHub's releases][releases]
functionality.
Releases are based on [Semantic Versioning][semver], and use the format
of `MAJOR.MINOR.PATCH`. In a nutshell, the version will be incremented
based on the following:
- `MAJOR`: Incompatible or major changes.
- `MINOR`: Backwards-compatible new features and enhancements.
- `PATCH`: Backwards-compatible bugfixes and package updates.
## Support
Got questions?
You have several ways to get them answered:
- The Home Assistant [Community Forum][forum]. I am
[CentralCommand][forum-centralcommand] there.
- The Home Assistant [Discord Chat Server][discord-ha]. Use the #add-ons channel,
I am CentralCommand#0913 there.
You could also [open an issue here][issue] on GitHub.
## Authors & contributors
The original setup of this repository is by [Mike Degatano][mdegat01].
For a full list of all authors and contributors,
check [the contributor's page][contributors].
## License
MIT License
Copyright (c) 2021-2022 Mike Degatano
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
[add-addon-shield]: https://my.home-assistant.io/badges/supervisor_addon.svg
[add-addon]: https://my.home-assistant.io/redirect/supervisor_addon/?addon=39bd2704_promtail
[add-addon-grafana]: https://my.home-assistant.io/redirect/supervisor_addon/?addon=a0d7b954_grafana
[add-addon-loki]: https://my.home-assistant.io/redirect/supervisor_addon/?addon=39bd2704_loki
[add-repo-shield]: https://my.home-assistant.io/badges/supervisor_add_addon_repository.svg
[add-repo]: https://my.home-assistant.io/redirect/supervisor_add_addon_repository/?repository_url=https%3A%2F%2Fgithub.com%2Fmdegat01%2Fhassio-addons
[addon-default-config]: https://github.com/mdegat01/addon-promtail/blob/main/promtail/rootfs/etc/promtail/default-scrape-config.yaml
[addon-grafana]: https://github.com/hassio-addons/addon-grafana
[addon-google-drive-backup]: https://github.com/sabeechen/hassio-google-drive-backup
[addon-loki-doc]: https://github.com/mdegat01/addon-loki/blob/main/loki/DOCS.md#grafana
[addon-z2m]: https://github.com/zigbee2mqtt/hassio-zigbee2mqtt
[api]: https://grafana.com/docs/loki/latest/clients/promtail/#api
[contributors]: https://github.com/mdegat01/addon-promtail/graphs/contributors
[discord-ha]: https://discord.gg/c5DvZ4e
[forum-centralcommand]: https://community.home-assistant.io/u/CentralCommand/?u=CentralCommand
[forum]: https://community.home-assistant.io/t/home-assistant-add-on-promtail/293732?u=CentralCommand
[grafana]: https://grafana.com/oss/grafana/
[grafana-cloud]: https://grafana.com/products/cloud/
[grafana-cloud-docs-promtail]: https://grafana.com/docs/grafana-cloud/quickstart/logs_promtail_linuxnode/
[issue]: https://github.com/mdegat01/addon-promtail/issues
[logql]: https://grafana.com/docs/loki/latest/logql/
[loki-doc]: https://grafana.com/docs/loki/latest/overview/
[loki-doc-best-practices]: https://grafana.com/docs/loki/latest/best-practices/
[loki-in-grafana]: https://grafana.com/docs/loki/latest/getting-started/grafana/
[mdegat01]: https://github.com/mdegat01
[promtail-doc-examples]: https://grafana.com/docs/loki/latest/clients/promtail/configuration/#example-static-config
[promtil-doc-metrics]: https://grafana.com/docs/loki/latest/clients/promtail/configuration/#metrics
[promtail-doc-scrape-configs]: https://grafana.com/docs/loki/latest/clients/promtail/configuration/#scrape_configs
[promtail-doc-stages]: https://grafana.com/docs/loki/latest/clients/promtail/stages/
[releases]: https://github.com/mdegat01/addon-promtail/releases
[semver]: http://semver.org/spec/v2.0.0

View File

@@ -1,67 +0,0 @@
ARG BUILD_FROM
FROM ${BUILD_FROM}
# Build arguments
ARG BUILD_ARCH
ARG BUILD_DATE
ARG BUILD_DESCRIPTION
ARG BUILD_NAME
ARG BUILD_REF
ARG BUILD_REPOSITORY
ARG BUILD_VERSION
ARG YQ_VERSION
ARG PROMTAIL_VERSION
# Add yq and tzdata (required for the timestamp stage)
RUN set -eux; \
apt-get update; \
apt-get install -qy --no-install-recommends \
tar \
unzip \
libsystemd-dev \
; \
update-ca-certificates; \
case "${BUILD_ARCH}" in \
amd64) BINARCH='amd64' ;; \
armhf) BINARCH='arm' ;; \
armv7) BINARCH='arm' ;; \
aarch64) BINARCH='arm64' ;; \
*) echo >&2 "error: unsupported architecture (${APKARCH})"; exit 1 ;; \
esac; \
curl -s -J -L -o /tmp/yq.tar.gz \
"https://github.com/mikefarah/yq/releases/download/v${YQ_VERSION}/yq_linux_${BINARCH}.tar.gz"; \
tar -xf /tmp/yq.tar.gz -C /usr/bin; \
mv /usr/bin/yq_linux_${BINARCH} /usr/bin/yq; \
chmod a+x /usr/bin/yq; \
rm /tmp/yq.tar.gz; \
yq --version; \
apt-get clean; \
rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/*; \
curl -s -J -L -o /tmp/promtail.zip \
"https://github.com/grafana/loki/releases/download/v${PROMTAIL_VERSION}/promtail-linux-${BINARCH}.zip"; \
unzip /tmp/promtail.zip -d /usr/bin; \
mv /usr/bin/promtail-linux-${BINARCH} /usr/bin/promtail; \
chmod +x /usr/bin/promtail; \
rm /tmp/promtail.zip; \
mkdir -p /data/promtail; \
rm -rf /var/lib/apt/lists/*
COPY rootfs /
WORKDIR /data/promtail
# Labels
LABEL \
io.hass.name="${BUILD_NAME}" \
io.hass.description="${BUILD_DESCRIPTION}" \
io.hass.arch="${BUILD_ARCH}" \
io.hass.type="addon" \
io.hass.version=${BUILD_VERSION} \
maintainer="fbonelle" \
org.opencontainers.image.title="${BUILD_NAME}" \
org.opencontainers.image.description="${BUILD_DESCRIPTION}" \
org.opencontainers.image.vendor="fbonelle's addons" \
org.opencontainers.image.authors="fbonelle" \
org.opencontainers.image.licenses="MIT" \
org.opencontainers.image.created=${BUILD_DATE} \
org.opencontainers.image.revision=${BUILD_REF} \
org.opencontainers.image.version=${BUILD_VERSION}

View File

@@ -1,123 +0,0 @@
include <tunables/global>
# Docker overlay
@{docker_root}=/docker/ /var/lib/docker/
@{fs_root}=/ @{docker_root}/overlay2/*/diff/
@{do_etc}=@{fs_root}/etc/
@{do_opt}=@{fs_root}/opt/
@{do_run}=@{fs_root}/{run,var/run}/
@{do_usr}=@{fs_root}/usr/
@{do_var}=@{fs_root}/var/
# Systemd Journal location
@{journald}=/{run,var}/log/journal/{,**}
profile hassio_promtail flags=(attach_disconnected,mediate_deleted) {
include <abstractions/base>
include <abstractions/bash>
# Send signals to child services
signal (send) peer=@{profile_name}//*,
# Network access
network tcp,
network udp,
# S6-Overlay
/init rix,
/bin/** rix,
@{do_usr}/bin/** rix,
@{do_usr}/sbin/** rix,
@{do_usr}/share/{,**} r,
@{do_usr}/lib/locale/{,**} r,
@{do_etc}/* rw,
@{do_etc}/s6*/** r,
@{do_etc}/fix-attrs.d/{,**} r,
@{do_etc}/cont-{init,finish}.d/{,**} rwix,
@{do_etc}/services.d/{,**} rwix,
@{do_etc}/ssl/openssl.cnf r,
@{do_etc}/{group,hosts,passwd} r,
@{do_etc}/{host,nsswitch,resolv}.conf r,
@{do_run}/{s6,s6-rc*,service}/** rix,
@{do_run}/{,**} rwk,
/var/cache/{,**} rw,
/dev/tty rw,
/dev/null k,
/command/** rix,
/package/** rix,
# Bashio
@{do_usr}/lib/bashio/** ix,
/tmp/** rw,
# Options.json & addon data
/data r,
/data/** rw,
# Files needed for setup
@{do_etc}/promtail/{,**} rw,
/config/promtail/{,**} r,
/{share,ssl}/{,**} r,
@{journald} r,
# Programs
/usr/bin/promtail cx -> promtail_profile,
/usr/bin/yq Cx -> yq_profile,
/usr/sbin/dpkg-reconfigure Cx -> dpkg_reconfigure_profile,
profile promtail_profile flags=(attach_disconnected,mediate_deleted) {
include <abstractions/base>
# Receive signals from s6
signal (receive) peer=*_promtail,
# Network access
network tcp,
network udp,
network netlink raw,
network unix dgram,
# Temp files
/tmp/.positions.yaml* rw,
# Addon data
/data/** r,
/data/promtail/** rwk,
# Config & log data
@{do_etc}/promtail/config.yaml r,
/config/promtail/{,**} r,
/{share,ssl}/** r,
@{journald} r,
# Runtime usage
/usr/bin/promtail rm,
@{do_etc}/{hosts,passwd} r,
@{do_etc}/{resolv,nsswitch}.conf r,
@{PROC}/sys/net/core/somaxconn r,
@{sys}/kernel/mm/transparent_hugepage/hpage_pmd_size r,
/dev/null k,
@{do_etc}/ssl/certs/** r,
}
profile yq_profile flags=(attach_disconnected,mediate_deleted) {
include <abstractions/base>
# Config files
@{do_etc}/promtail/* rw,
/config/promtail/{,**} r,
/share/** r,
# Runtime usage
/usr/bin/yq rm,
@{sys}/kernel/mm/transparent_hugepage/hpage_pmd_size r,
/dev/null k,
}
profile dpkg_reconfigure_profile flags=(attach_disconnected,mediate_deleted) {
include <abstractions/base>
/** rwlkmix,
}
}

View File

@@ -1,10 +0,0 @@
---
build_from:
aarch64: ghcr.io/hassio-addons/debian-base:7.3.5
amd64: ghcr.io/hassio-addons/debian-base:7.3.5
armhf: ghcr.io/hassio-addons/debian-base:7.3.5
armv7: ghcr.io/hassio-addons/debian-base:7.3.5
i386: ghcr.io/hassio-addons/debian-base:7.3.5
args:
YQ_VERSION: 4.44.2
PROMTAIL_VERSION: 3.1.0

View File

@@ -1,39 +0,0 @@
---
name: Promtail
url: https://gitea.bonelle-family.dscloud.biz/francois.bonelle/hassio-repo.git
version: 3.1.0
slug: hassio_promtail
arch:
- aarch64
- amd64
- armv7
- armhf
description: Promtail for Home Assistant
init: false
journald: true
map:
- config
- share
- ssl
watchdog: http://[HOST]:[PORT:9080]/ready
ports:
9080/tcp:
ports_description:
9080/tcp: Promtail web server
options:
client:
url: http://loki:3100/loki/api/v1/push
log_level: info
schema:
client:
url: str
username: str?
password: password?
cafile: str?
servername: str?
certfile: str?
keyfile: str?
additional_pipeline_stages: str?
additional_scrape_configs: str?
skip_default_scrape_config: bool?
log_level: list(trace|debug|info|notice|warning|error|fatal)?

Binary file not shown.

Before

Width:  |  Height:  |  Size: 20 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 15 KiB

View File

@@ -1,129 +0,0 @@
#!/usr/bin/with-contenv bashio
# shellcheck shell=bash
# ==============================================================================
# Home Assistant Add-on: Promtail
# This file makes the config file from inputs
# ==============================================================================
readonly CONFIG_DIR=/etc/promtail
readonly CONFIG_FILE="${CONFIG_DIR}/config.yaml"
readonly BASE_CONFIG="${CONFIG_DIR}/base_config.yaml"
readonly DEF_SCRAPE_CONFIGS="${CONFIG_DIR}/default-scrape-config.yaml"
readonly CUSTOM_SCRAPE_CONFIGS="${CONFIG_DIR}/custom-scrape-config.yaml"
declare cafile
declare add_stages
declare add_scrape_configs
scrape_configs="${DEF_SCRAPE_CONFIGS}"
bashio::log.info 'Setting base config for promtail...'
cp "${BASE_CONFIG}" "${CONFIG_FILE}"
# Set up client section
if ! bashio::config.is_empty 'client.username'; then
bashio::log.info 'Adding basic auth to client config...'
bashio::config.require 'client.password' "'client.username' is specified"
{
echo " basic_auth:"
echo " username: $(bashio::config 'client.username')"
echo " password: $(bashio::config 'client.password')"
} >> "${CONFIG_FILE}"
fi
if ! bashio::config.is_empty 'client.cafile'; then
bashio::log.info "Adding TLS to client config..."
cafile="/ssl/$(bashio::config 'client.cafile')"
if ! bashio::fs.file_exists "${cafile}"; then
bashio::log.fatal
bashio::log.fatal "The file specified for 'cafile' does not exist!"
bashio::log.fatal "Ensure the CA certificate file exists and full path is provided"
bashio::log.fatal
bashio::exit.nok
fi
{
echo " tls_config:"
echo " ca_file: ${cafile}"
} >> "${CONFIG_FILE}"
if ! bashio::config.is_empty 'client.servername'; then
echo " server_name: $(bashio::config 'client.servername')" >> "${CONFIG_FILE}"
fi
if ! bashio::config.is_empty 'client.certfile'; then
bashio::log.info "Adding mTLS to client config..."
bashio::config.require 'client.keyfile' "'client.certfile' is specified"
if ! bashio::fs.file_exists "$(bashio::config 'client.certfile')"; then
bashio::log.fatal
bashio::log.fatal "The file specified for 'certfile' does not exist!"
bashio::log.fatal "Ensure the certificate file exists and full path is provided"
bashio::log.fatal
bashio::exit.nok
fi
if ! bashio::fs.file_exists "$(bashio::config 'client.keyfile')"; then
bashio::log.fatal
bashio::log.fatal "The file specified for 'keyfile' does not exist!"
bashio::log.fatal "Ensure the key file exists and full path is provided"
bashio::log.fatal
bashio::exit.nok
fi
{
echo " cert_file: $(bashio::config 'client.certfile')"
echo " key_file: $(bashio::config 'client.keyfile')"
} >> "${CONFIG_FILE}"
fi
fi
# Add in scrape configs
{
echo
echo "scrape_configs:"
} >> "${CONFIG_FILE}"
if bashio::config.true 'skip_default_scrape_config'; then
bashio::log.info 'Skipping default journald scrape config...'
if ! bashio::config.is_empty 'additional_pipeline_stages'; then
bashio::log.warning
bashio::log.warning "'additional_pipeline_stages' ignored since 'skip_default_scrape_config' is true!"
bashio::log.warning 'See documentation for more information.'
bashio::log.warning
fi
bashio::config.require 'additional_scrape_configs' "'skip_default_scrape_config' is true"
elif ! bashio::config.is_empty 'additional_pipeline_stages'; then
bashio::log.info "Adding additional pipeline stages to default journal scrape config..."
add_stages="$(bashio::config 'additional_pipeline_stages')"
scrape_configs="${CUSTOM_SCRAPE_CONFIGS}"
if ! bashio::fs.file_exists "${add_stages}"; then
bashio::log.fatal
bashio::log.fatal "The file specified for 'additional_pipeline_stages' does not exist!"
bashio::log.fatal "Ensure the file exists at the path specified"
bashio::log.fatal
bashio::exit.nok
fi
yq -NP eval-all \
'select(fi == 0) + [{"add_pipeline_stages": select(fi == 1)}]' \
"${DEF_SCRAPE_CONFIGS}" "${add_stages}" \
| yq -NP e \
'[(.[0] * .[1]) | {"job_name": .job_name, "journal": .journal, "relabel_configs": .relabel_configs, "pipeline_stages": .pipeline_stages + .add_pipeline_stages}]' \
- > "${scrape_configs}"
fi
if ! bashio::config.is_empty 'additional_scrape_configs'; then
bashio::log.info "Adding custom scrape configs..."
add_scrape_configs="$(bashio::config 'additional_scrape_configs')"
if ! bashio::fs.file_exists "${add_scrape_configs}"; then
bashio::log.fatal
bashio::log.fatal "The file specified for 'additional_scrape_configs' does not exist!"
bashio::log.fatal "Ensure the file exists at the path specified"
bashio::log.fatal
bashio::exit.nok
fi
if bashio::config.true 'skip_default_scrape_config'; then
yq -NP e '[] + .' "${add_scrape_configs}" >> "${CONFIG_FILE}"
else
yq -NP eval-all 'select(fi == 0) + select(fi == 1)' \
"${scrape_configs}" "${add_scrape_configs}" >> "${CONFIG_FILE}"
fi
else
yq -NP e '[] + .' "${scrape_configs}" >> "${CONFIG_FILE}"
fi

View File

@@ -1,11 +0,0 @@
---
server:
http_listen_port: 9080
grpc_listen_port: 0
log_level: "${LOG_LEVEL}"
positions:
filename: /data/promtail/positions.yaml
clients:
- url: "${URL}"

View File

@@ -1,27 +0,0 @@
---
- job_name: journal
journal:
json: false
max_age: 12h
labels:
job: systemd-journal
path: "${JOURNAL_PATH}"
relabel_configs:
- source_labels:
- __journal__systemd_unit
target_label: unit
- source_labels:
- __journal__hostname
target_label: nodename
- source_labels:
- __journal_syslog_identifier
target_label: syslog_identifier
- source_labels:
- __journal_container_name
target_label: container_name
pipeline_stages:
- match:
selector: '{container_name=~"homeassistant|hassio_supervisor"}'
stages:
- multiline:
firstline: '^\x{001b}'

View File

@@ -1,15 +0,0 @@
#!/usr/bin/env bashio
# ==============================================================================
# Take down the S6 supervision tree when Promtail fails
# s6-overlay docs: https://github.com/just-containers/s6-overlay
# ==============================================================================
declare APP_EXIT_CODE=${1}
if [[ "${APP_EXIT_CODE}" -ne 0 ]] && [[ "${APP_EXIT_CODE}" -ne 256 ]]; then
bashio::log.warning "Halt add-on with exit code ${APP_EXIT_CODE}"
echo "${APP_EXIT_CODE}" > /run/s6-linux-init-container-results/exitcode
exec /run/s6/basedir/bin/halt
fi
bashio::log.info "Service restart after closing"

View File

@@ -1,40 +0,0 @@
#!/usr/bin/with-contenv bashio
# shellcheck shell=bash
# ==============================================================================
# Home Assistant Add-on: Promtail
# Runs Promtail
# ==============================================================================
readonly PROMTAIL_CONFIG='/etc/promtail/config.yaml'
declare log_level
bashio::log.info 'Starting Promtail...'
journal_path='/var/log/journal'
if ! bashio::fs.directory_exists "${journal_path}" || [ -z "$(ls -A ${journal_path})" ]; then
bashio::log.info "No journal at ${journal_path}, looking for journal in /run/log/journal instead"
journal_path='/run/log/journal'
fi
case "$(bashio::config 'log_level')" in \
trace) ;& \
debug) log_level='debug' ;; \
notice) ;& \
warning) log_level='warn' ;; \
error) ;& \
fatal) log_level='error' ;; \
*) log_level='info' ;; \
esac;
bashio::log.info "Promtail log level set to ${log_level}"
export "URL=$(bashio::config 'client.url')"
export "JOURNAL_PATH=${journal_path}"
export "LOG_LEVEL=${log_level}"
promtail_args=("-config.expand-env=true" "-config.file=${PROMTAIL_CONFIG}")
if [ "${log_level}" == "debug" ]; then
bashio::log.debug "Logging full config on startup for debugging..."
promtail_args+=("-print-config-stderr=true")
fi
bashio::log.info "Handing over control to Promtail..."
/usr/bin/promtail "${promtail_args[@]}"

View File

@@ -1,67 +0,0 @@
ARG BUILD_FROM
FROM ${BUILD_FROM}
# Build arguments
ARG BUILD_ARCH
ARG BUILD_DATE
ARG BUILD_DESCRIPTION
ARG BUILD_NAME
ARG BUILD_REF
ARG BUILD_REPOSITORY
ARG BUILD_VERSION
ARG TELEGRAF_VERSION
# Environment variables
ENV \
HOME="/root" \
LANG="C.UTF-8" \
PS1="$(whoami)@$(hostname):$(pwd)$ " \
TERM="xterm-256color"
# Copy root filesystem
COPY rootfs /
# Set shell
SHELL ["/bin/bash", "-o", "pipefail", "-c"]
RUN DEBIAN_FRONTEND=noninteractive apt-get update && \
DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends bash ca-certificates curl jq tzdata xz-utils iputils-ping snmp procps lm-sensors libcap2-bin wget gnupg && \
c_rehash && \
rm -rf /var/lib/apt/lists/*
ENV TELEGRAF_VERSION ${TELEGRAF_VERSION}
RUN set -ex && \
mkdir ~/.gnupg && \
chmod 600 ~/.gnupg/* && \
chmod 700 ~/.gnupg && \
echo "disable-ipv6" >> ~/.gnupg/dirmngr.conf; \
wget -q https://repos.influxdata.com/influxdata-archive_compat.key && \
echo '393e8779c89ac8d958f81f942f9ad7fb82a25e133faddaf92e15b16e6ac9ce4c influxdata-archive_compat.key' | sha256sum -c && cat influxdata-archive_compat.key | gpg --dearmor | tee /etc/apt/trusted.gpg.d/influxdata-archive_compat.gpg > /dev/null && \
echo 'deb [signed-by=/etc/apt/trusted.gpg.d/influxdata-archive_compat.gpg] https://repos.influxdata.com/debian stable main' | tee /etc/apt/sources.list.d/influxdata.list && \
apt-get update && \
apt-get install telegraf=${TELEGRAF_VERSION} && \
rm -rf /var/lib/apt/lists/*
EXPOSE 8125/udp 8092/udp 8094
COPY entrypoint.sh /entrypoint.sh
COPY settings.sh /settings.sh
ENTRYPOINT ["/entrypoint.sh"]
CMD ["telegraf"]
# Labels
LABEL \
io.hass.name="${BUILD_NAME}" \
io.hass.description="${BUILD_DESCRIPTION}" \
io.hass.arch="${BUILD_ARCH}" \
io.hass.type="addon" \
io.hass.version=${BUILD_VERSION} \
maintainer="fbonelle" \
org.opencontainers.image.title="${BUILD_NAME}" \
org.opencontainers.image.description="${BUILD_DESCRIPTION}" \
org.opencontainers.image.vendor="fbonelle's addons" \
org.opencontainers.image.authors="fbonelle" \
org.opencontainers.image.licenses="MIT" \
org.opencontainers.image.created=${BUILD_DATE} \
org.opencontainers.image.revision=${BUILD_REF} \
org.opencontainers.image.version=${BUILD_VERSION}

View File

@@ -1,8 +0,0 @@
build_from:
aarch64: ghcr.io/hassio-addons/debian-base:7.3.5
amd64: ghcr.io/hassio-addons/debian-base:7.3.5
armhf: ghcr.io/hassio-addons/debian-base:7.3.5
armv7: ghcr.io/hassio-addons/debian-base:7.3.5
i386: ghcr.io/hassio-addons/debian-base:7.3.5
args:
TELEGRAF_VERSION: 1.31.1-1

View File

@@ -1,39 +0,0 @@
---
name: Telegraf
version: 1.31.1-1
slug: hassio_telegraf
description: An addon to add telegraf to hassio.
url: https://gitea.bonelle-family.dscloud.biz/francois.bonelle/hassio-repo.git
init: false
arch:
- aarch64
- amd64
- armhf
- armv7
- i386
ports:
9273/tcp: 9273
hassio_api: true
hassio_role: default
host_network: true
auth_api: true
privileged:
- SYS_ADMIN
apparmor: false
map:
- config:rw
- ssl:rw
- addons:rw
- backup:rw
- share:rw
startup: services
boot: manual
docker_api: true
host_pid: true
full_access: true
options:
custom_conf:
location: "/config/telegraf.conf"
schema:
custom_conf:
location: str

View File

@@ -1,17 +0,0 @@
#!/bin/bash
set -e
bashio /settings.sh
if [ "${1:0:1}" = '-' ]; then
set -- telegraf "$@"
fi
if [ $EUID -ne 0 ]; then
exec "$@"
else
# Allow telegraf to send ICMP packets and bind to privliged ports
setcap cap_net_raw,cap_net_bind_service+ep /usr/bin/telegraf || echo "Failed to set additional capabilities on /usr/bin/telegraf"
exec setpriv --reuid root --init-groups "$@"
fi

Binary file not shown.

Before

Width:  |  Height:  |  Size: 18 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 17 KiB

View File

@@ -1,37 +0,0 @@
#!/command/with-contenv bashio
# ==============================================================================
# Home Assistant Community Add-on: Base Images
# Displays a simple add-on banner on startup
# ==============================================================================
if bashio::supervisor.ping; then
bashio::log.blue \
'-----------------------------------------------------------'
bashio::log.blue " Add-on: $(bashio::addon.name)"
bashio::log.blue " $(bashio::addon.description)"
bashio::log.blue \
'-----------------------------------------------------------'
bashio::log.blue " Add-on version: $(bashio::addon.version)"
if bashio::var.true "$(bashio::addon.update_available)"; then
bashio::log.magenta ' There is an update available for this add-on!'
bashio::log.magenta \
" Latest add-on version: $(bashio::addon.version_latest)"
bashio::log.magenta ' Please consider upgrading as soon as possible.'
else
bashio::log.green ' You are running the latest version of this add-on.'
fi
bashio::log.blue " System: $(bashio::info.operating_system)" \
" ($(bashio::info.arch) / $(bashio::info.machine))"
bashio::log.blue " Home Assistant Core: $(bashio::info.homeassistant)"
bashio::log.blue " Home Assistant Supervisor: $(bashio::info.supervisor)"
bashio::log.blue \
'-----------------------------------------------------------'
bashio::log.blue \
' Please, share the above information when looking for help'
bashio::log.blue \
' or support in, e.g., GitHub, forums or the Discord chat.'
bashio::log.blue \
'-----------------------------------------------------------'
fi

View File

@@ -1,46 +0,0 @@
#!/command/with-contenv bashio
# ==============================================================================
# Home Assistant Community Add-on: Base Images
# Sets the log level correctly
# ==============================================================================
declare log_level
# Check if the log level configuration option exists
if bashio::config.exists log_level; then
# Find the matching LOG_LEVEL
log_level=$(bashio::string.lower "$(bashio::config log_level)")
case "${log_level}" in
all)
log_level="${__BASHIO_LOG_LEVEL_ALL}"
;;
trace)
log_level="${__BASHIO_LOG_LEVEL_TRACE}"
;;
debug)
log_level="${__BASHIO_LOG_LEVEL_DEBUG}"
;;
info)
log_level="${__BASHIO_LOG_LEVEL_INFO}"
;;
notice)
log_level="${__BASHIO_LOG_LEVEL_NOTICE}"
;;
warning)
log_level="${__BASHIO_LOG_LEVEL_WARNING}"
;;
error)
log_level="${__BASHIO_LOG_LEVEL_ERROR}"
;;
fatal)
log_level="${__BASHIO_LOG_LEVEL_FATAL}"
;;
off)
log_level="${__BASHIO_LOG_LEVEL_OFF}"
;;
*)
bashio::exit.nok "Unknown log_level: ${log_level}"
esac
bashio::log.blue "Log level is set to ${__BASHIO_LOG_LEVELS[$log_level]}"
fi

View File

@@ -1,11 +0,0 @@
#!/command/with-contenv bashio
# ==============================================================================
# Home Assistant Community Add-on: Base Images
# Configures the timezone
# ==============================================================================
if ! bashio::var.is_empty "${TZ}"; then
bashio::log.info "Configuring timezone"
ln --symbolic --no-dereference --force "/usr/share/zoneinfo/${TZ}" /etc/localtime
echo "${TZ}" > /etc/timezone
fi

View File

@@ -1,13 +0,0 @@
#!/usr/bin/env bashio
declare hostname
bashio::require.unprotected
readonly CONFIG="/etc/telegraf/telegraf.conf"
CUSTOM_CONF=$(bashio::config 'custom_conf.location')
bashio::log.info "Using custom conf file"
rm /etc/telegraf/telegraf.conf
cp "${CUSTOM_CONF}" /etc/telegraf/telegraf.conf
bashio::log.info "Finished updating config, Starting Telegraf"