Compare commits
4 commits
stirling-p
...
main
Author | SHA1 | Date | |
---|---|---|---|
0c4eed7709 | |||
f20697a85c | |||
99241d0061 | |||
16ff239ea9 |
21 changed files with 127 additions and 877 deletions
|
@ -10,10 +10,6 @@ services:
|
||||||
environment:
|
environment:
|
||||||
- INIT_ASSETS=1
|
- INIT_ASSETS=1
|
||||||
labels:
|
labels:
|
||||||
- "traefik.enable=true"
|
|
||||||
- "traefik.http.routers.homer.rule=Host(`dashboard.${DOMAIN}`)"
|
|
||||||
- "traefik.http.routers.homer.entrypoints=web"
|
|
||||||
- "traefik.http.services.homer.loadbalancer.server.port=8080"
|
|
||||||
- "docker.group=dashboard"
|
- "docker.group=dashboard"
|
||||||
restart: unless-stopped
|
restart: unless-stopped
|
||||||
|
|
||||||
|
|
101
doc/manual.md
101
doc/manual.md
|
@ -1,61 +1,78 @@
|
||||||
# Manual and Help
|
# Manual and Help
|
||||||
|
|
||||||
## Upgrade Postgrs to newer Version (i.e. 15 to 16)
|
## Upgrade Postgrs to newer Version (i.e. 15 to 16)
|
||||||
|
[Source](https://helgeklein.com/blog/upgrading-postgresql-in-docker-container/)
|
||||||
|
|
||||||
|
### 1. Stopp application conatiner
|
||||||
### 1. Copy Service
|
|
||||||
|
|
||||||
1.1 Copy whole service definition in docker-compose.yaml
|
|
||||||
1.2 Rename old service to *-old
|
|
||||||
1.3 Move path from new service to i.e postgres16
|
|
||||||
1.4 Set postgres version explicit to new version
|
|
||||||
|
|
||||||
Example:
|
|
||||||
```
|
```
|
||||||
databasedb-old:
|
docker compose down APP_CONTAINER_NAME
|
||||||
image: postgres:${POSTGRES_VERSION}
|
|
||||||
volumes:
|
|
||||||
- ${VOLUMES_PATH}/databasedb:/var/lib/postgresql/data
|
|
||||||
environment:
|
|
||||||
- POSTGRES_DB=${POSTGRES_DB}
|
|
||||||
- POSTGRES_USER=${POSTGRES_USER}
|
|
||||||
- POSTGRES_PASSWORD=${POSTGRES_PASSWORD}
|
|
||||||
|
|
||||||
databasedb:
|
|
||||||
image: postgres:16
|
|
||||||
volumes:
|
|
||||||
- ${VOLUMES_PATH}/databasedb16:/var/lib/postgresql/data
|
|
||||||
environment:
|
|
||||||
- POSTGRES_DB=${POSTGRES_DB}
|
|
||||||
- POSTGRES_USER=${POSTGRES_USER}
|
|
||||||
- POSTGRES_PASSWORD=${POSTGRES_PASSWORD}
|
|
||||||
```
|
```
|
||||||
|
|
||||||
### 2. Move data
|
|
||||||
Backup data from service one to new servic with the following command:
|
### 2. Create a Full DB Dump
|
||||||
|
Look up the name of your PostgreSQL user in your Docker configuration. Sometimes postgres or something
|
||||||
|
|
||||||
```
|
```
|
||||||
docker exec old-service pg_dumpall -U dbuser | docker exec -i new-service psql -U dbuser -d database
|
docker exec -it POSTGRESQL_CONTAINER_NAME pg_dumpall -U postgres > dump.sql
|
||||||
```
|
```
|
||||||
|
|
||||||
### 3. Set password
|
### 3. Stop the PostgreSQL Container
|
||||||
```
|
```
|
||||||
docker exec -i monitoring-databasedb-1 psql -U dbuser -d database -c "ALTER USER \"dbuser\" PASSWORD 'secret';"
|
docker stop POSTGRESQL_CONTAINER_NAME # with compose
|
||||||
|
docker compose stop POSTGRESQL_SERVICE_NAME # with compose
|
||||||
|
docker compose down # all
|
||||||
```
|
```
|
||||||
|
|
||||||
### 4. Test
|
### 4. Move the DB Data Directory
|
||||||
```docker compose up -d``` and check if service is correctly running.
|
Use root priveleges to move data dir. Backup old one.
|
||||||
|
```
|
||||||
|
mv db/ db-old/
|
||||||
|
mkdir db
|
||||||
|
```
|
||||||
|
|
||||||
### 5. Cleanup
|
### 5. Increment the PostgreSQL Version
|
||||||
5.1 Remove old service in docker-compose.yaml
|
Edit the Docker compose file, incrementing the image version.
|
||||||
5.2 Set explicit version again to ${POSTGRES_VERSION} and adopt .env file
|
If image is set with postgres:${POSTGRES_VERSION} change .env file.
|
||||||
5.4 remove old volume dir
|
|
||||||
|
|
||||||
|
### 6. Start container with empty data directory
|
||||||
|
Start container and Verify logs
|
||||||
|
```
|
||||||
|
docker compose up -d POSTGRESQL_CONTAINER_NAME
|
||||||
|
docker compose logs -f
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
|
### 7. Import DB Dump
|
||||||
|
Backup data from service one to new servic with the following command
|
||||||
|
Use this:
|
||||||
|
```
|
||||||
|
docker compose exec -T POSTGRESQL_SERVICE_NAME psql -U POSTGRES_USER POSTGRES_DB < dump.sql
|
||||||
|
```
|
||||||
|
|
||||||
|
### 8. set password
|
||||||
|
```
|
||||||
|
docker exec -i POSTGRESQL_CONTAINER_NAME psql -U POSTGRES_USER -d database -c "ALTER USER \"POSTGRES_USER\" PASSWORD 'POSTGRES_PASSWORD';"
|
||||||
|
```
|
||||||
|
|
||||||
|
### 8. Start the Application Container
|
||||||
|
```
|
||||||
|
docker compose up -d APP_CONTAINER_NAME
|
||||||
|
```
|
||||||
|
|
||||||
|
### 9. Test
|
||||||
|
Check if service is working correctly.
|
||||||
|
Check logs:
|
||||||
|
```
|
||||||
|
docker compose logs -f
|
||||||
|
```
|
||||||
|
|
||||||
|
### 10. Clean Up
|
||||||
|
* Delete the backup directory: rm -rf db-old/
|
||||||
|
* Delete the dump file: rm dump.sql
|
||||||
|
* Delete the old PostgreSQL image: docker image prune -a
|
||||||
|
|
||||||
|
|
||||||
### 6. May be move Data dir
|
|
||||||
6.1. ```docker compose down```
|
|
||||||
6.2 ```mv /mnt/dockervolumes/databasedb16 /mnt/dockervolumes/databasedb```
|
|
||||||
6.3 docker-compose.yaml anpassen
|
|
||||||
6.1. ```docker compose up -d```
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
services:
|
services:
|
||||||
|
|
||||||
app:
|
pyload:
|
||||||
image: linuxserver/pyload-ng
|
image: linuxserver/pyload-ng
|
||||||
environment:
|
environment:
|
||||||
- PUID=1000
|
- PUID=1000
|
||||||
|
@ -9,16 +9,9 @@ services:
|
||||||
volumes:
|
volumes:
|
||||||
- ${VOLUMES_PATH}/download/pyload:/config
|
- ${VOLUMES_PATH}/download/pyload:/config
|
||||||
- ${DOWNLOAD_PATH}:/downloads
|
- ${DOWNLOAD_PATH}:/downloads
|
||||||
# ports:
|
|
||||||
#- 7227:7227 #optional Thrift Backend
|
|
||||||
networks:
|
networks:
|
||||||
- web
|
- web
|
||||||
labels:
|
labels:
|
||||||
- "traefik.enable=true"
|
|
||||||
- "traefik.http.routers.pyload.rule=Host(`download.${DOMAIN}`)"
|
|
||||||
- "traefik.http.routers.pyload.entrypoints=web"
|
|
||||||
- "traefik.http.services.pyload.loadbalancer.server.port=8000"
|
|
||||||
- "docker.group=selfhost"
|
|
||||||
- "docker.group=download"
|
- "docker.group=download"
|
||||||
restart: unless-stopped
|
restart: unless-stopped
|
||||||
|
|
||||||
|
|
|
@ -28,16 +28,4 @@ services:
|
||||||
# - /dev/dri/renderD128:/dev/dri/renderD128
|
# - /dev/dri/renderD128:/dev/dri/renderD128
|
||||||
# - /dev/dri/card0:/dev/dri/card0
|
# - /dev/dri/card0:/dev/dri/card0
|
||||||
labels:
|
labels:
|
||||||
- "traefik.enable=true"
|
|
||||||
- "traefik.http.routers.jellyfin.rule=Host(`jellyfin.${DOMAIN}`)"
|
|
||||||
- "traefik.http.routers.jellyfin.entrypoints=web"
|
|
||||||
- "traefik.http.services.jellyfin.loadbalancer.server.port=8096"
|
|
||||||
- 'traefik.http.middlewares.jellyfin-mw.headers.customResponseHeaders.X-Robots-Tag=noindex,nofollow,nosnippet,noarchive,notranslate,noimageindex'
|
|
||||||
- 'traefik.http.middlewares.jellyfin-mw.headers.frameDeny=true'
|
|
||||||
- 'traefik.http.middlewares.jellyfin-mw.headers.contentTypeNosniff=true'
|
|
||||||
- 'traefik.http.middlewares.jellyfin-mw.headers.customresponseheaders.X-XSS-PROTECTION=1'
|
|
||||||
- 'traefik.http.middlewares.jellyfin-mw.headers.browserXSSFilter=true'
|
|
||||||
- "traefik.http.middlewares.jellyfin-mw.headers.customFrameOptionsValue='allow-from http://jellyfin.${DOMAIN}'"
|
|
||||||
- "traefik.http.middlewares.jellyfin-mw.headers.customrequestheaders.X-Forwarded-Proto: https"
|
|
||||||
- 'traefik.http.routers.jellyfin.middlewares=jellyfin-mw'
|
|
||||||
- "docker.group=media"
|
- "docker.group=media"
|
||||||
|
|
|
@ -1,94 +1,15 @@
|
||||||
services:
|
services:
|
||||||
|
|
||||||
grafana:
|
|
||||||
image: grafana/grafana:${GRAFANA_VERSION}
|
|
||||||
restart: unless-stopped
|
|
||||||
expose:
|
|
||||||
- "3000"
|
|
||||||
networks:
|
|
||||||
- web
|
|
||||||
- monitoring
|
|
||||||
labels:
|
|
||||||
- "traefik.enable=true"
|
|
||||||
- "traefik.http.routers.monitoring.rule=Host(`monitoring.${DOMAIN}`)"
|
|
||||||
- "traefik.http.routers.monitoring.entrypoints=web"
|
|
||||||
- "traefik.http.services.monitoring.loadbalancer.server.port=3000"
|
|
||||||
- "traefik.docker.network=web"
|
|
||||||
- "docker.group=monitoring"
|
|
||||||
environment:
|
|
||||||
- GF_DEFAULT_INSTANCE_NAME=monitoring.${DOMAIN}
|
|
||||||
- GF_SERVER_ROOT_URL=http://monitoring.${DOMAIN}
|
|
||||||
- GF_SERVER_DOMAIN=monitoring.${DOMAIN}
|
|
||||||
- GF_SERVER_SERVE_FROM_SUB_PATH=true
|
|
||||||
- GF_SECURITY_DISABLE_GRAVATAR=true
|
|
||||||
- GF_AUTH_ANONYMOUS_ENABLED=true
|
|
||||||
- GF_AUTH_ANONYMOUS_ORG_ROLE=Viewer
|
|
||||||
- GF_DATABASE_TYPE=postgres
|
|
||||||
- GF_DATABASE_HOST=grafanadb:5432
|
|
||||||
- GF_DATABASE_SSL_MODE=disable
|
|
||||||
- GF_DATABASE_NAME=grafana
|
|
||||||
- GF_DATABASE_USER=${POSTGRES_USER}
|
|
||||||
- GF_DATABASE_PASSWORD=${POSTGRES_PASSWORD}
|
|
||||||
- GF_INSTALL_PLUGINS=flant-statusmap-panel
|
|
||||||
depends_on:
|
|
||||||
- influxdb
|
|
||||||
- grafanadb
|
|
||||||
|
|
||||||
|
|
||||||
grafanadb:
|
|
||||||
image: postgres:${POSTGRES_VERSION}
|
|
||||||
volumes:
|
|
||||||
- ${VOLUMES_PATH}/monitoring/grafanadb:/var/lib/postgresql/data
|
|
||||||
networks:
|
|
||||||
- monitoring
|
|
||||||
restart: unless-stopped
|
|
||||||
environment:
|
|
||||||
- POSTGRES_DB=grafana
|
|
||||||
- POSTGRES_USER=${POSTGRES_USER}
|
|
||||||
- POSTGRES_PASSWORD=${POSTGRES_PASSWORD}
|
|
||||||
labels:
|
|
||||||
- "docker.group=monitoring"
|
|
||||||
|
|
||||||
|
|
||||||
influxdb:
|
|
||||||
image: influxdb:${INFLUXDB_VERSION}
|
|
||||||
restart: unless-stopped
|
|
||||||
networks:
|
|
||||||
- web
|
|
||||||
- monitoring
|
|
||||||
environment:
|
|
||||||
- INFLUXDB_MONITOR_STORE_ENABLED=false
|
|
||||||
volumes:
|
|
||||||
- ${VOLUMES_PATH}/monitoring/influxdb/:/var/lib/influxdb
|
|
||||||
- ${PWD}/influxdb.conf:/etc/influxdb/influxdb.conf:ro
|
|
||||||
labels:
|
|
||||||
- "traefik.enable=true"
|
|
||||||
- "traefik.http.routers.influxdb.rule=Host(`influxdb.${DOMAIN}`)"
|
|
||||||
- "traefik.http.routers.influxdb.entrypoints=web"
|
|
||||||
- "traefik.http.services.influxdb.loadbalancer.server.port=8086"
|
|
||||||
- "traefik.docker.network=web"
|
|
||||||
- "docker.group=monitoring"
|
|
||||||
|
|
||||||
|
|
||||||
uptime-kuma:
|
uptime-kuma:
|
||||||
image: louislam/uptime-kuma:1
|
image: louislam/uptime-kuma:1
|
||||||
volumes:
|
volumes:
|
||||||
- ${VOLUMES_PATH}/monitoring/uptime-kuma-data:/app/data
|
- ${VOLUMES_PATH}/monitoring/uptime-kuma-data:/app/data
|
||||||
networks:
|
networks:
|
||||||
- web
|
- web
|
||||||
- dockersocket
|
|
||||||
restart: unless-stopped
|
restart: unless-stopped
|
||||||
labels:
|
labels:
|
||||||
- "traefik.enable=true"
|
|
||||||
- "traefik.http.routers.uptimekuma.rule=Host(`uptime.${DOMAIN}`)"
|
|
||||||
- "traefik.http.routers.uptimekuma.entrypoints=web"
|
|
||||||
- "traefik.http.services.uptimekuma.loadbalancer.server.port=3001"
|
|
||||||
- "traefik.docker.network=web"
|
|
||||||
- "docker.group=monitoring"
|
- "docker.group=monitoring"
|
||||||
|
|
||||||
networks:
|
networks:
|
||||||
monitoring:
|
|
||||||
web:
|
web:
|
||||||
external: true
|
external: true
|
||||||
dockersocket:
|
|
||||||
external: true
|
|
||||||
|
|
|
@ -1,12 +0,0 @@
|
||||||
[meta]
|
|
||||||
dir = "/var/lib/influxdb/meta"
|
|
||||||
|
|
||||||
[data]
|
|
||||||
dir = "/var/lib/influxdb/data"
|
|
||||||
wal-dir = "/var/lib/influxdb/wal"
|
|
||||||
max-concurrent-compactions = 1
|
|
||||||
|
|
||||||
[monitor]
|
|
||||||
store-enabled = false
|
|
||||||
store-database = "_internal"
|
|
||||||
store-interval = "10s"
|
|
|
@ -1 +0,0 @@
|
||||||
docker run --rm --link=monitoring_influxdb_1 -it --net monitoring_monitoring influxdb:1.8 influx -host influxdb
|
|
|
@ -59,11 +59,6 @@ services:
|
||||||
- USERMAP_GID=1000
|
- USERMAP_GID=1000
|
||||||
- PAPERLESS_PRE_CONSUME_SCRIPT=/usr/src/paperless/scripts/removePdfPassword.py
|
- PAPERLESS_PRE_CONSUME_SCRIPT=/usr/src/paperless/scripts/removePdfPassword.py
|
||||||
labels:
|
labels:
|
||||||
- "traefik.enable=true"
|
|
||||||
- "traefik.http.routers.paperless.rule=Host(`paperless.${DOMAIN}`)"
|
|
||||||
- "traefik.http.routers.paperless.entrypoints=web"
|
|
||||||
- "traefik.http.services.paperless.loadbalancer.server.port=8000"
|
|
||||||
- "traefik.docker.network=web"
|
|
||||||
- "docker.group=paperless"
|
- "docker.group=paperless"
|
||||||
|
|
||||||
|
|
||||||
|
|
44
proxy/Caddyfile
Normal file
44
proxy/Caddyfile
Normal file
|
@ -0,0 +1,44 @@
|
||||||
|
{
|
||||||
|
auto_https off
|
||||||
|
}
|
||||||
|
|
||||||
|
http://whoami.lan {
|
||||||
|
reverse_proxy whoami:80
|
||||||
|
}
|
||||||
|
|
||||||
|
http://dashboard.lan {
|
||||||
|
reverse_proxy homer:8080
|
||||||
|
}
|
||||||
|
|
||||||
|
http://hassi.lan {
|
||||||
|
# reverse_proxy homeassistant:8123
|
||||||
|
reverse_proxy dockerhost-1.lan:8123
|
||||||
|
}
|
||||||
|
|
||||||
|
http://zigbee2mqtt.lan {
|
||||||
|
reverse_proxy zigbee2mqtt:8080
|
||||||
|
}
|
||||||
|
|
||||||
|
http://jellyfin.lan {
|
||||||
|
reverse_proxy jellyfin:8096
|
||||||
|
}
|
||||||
|
|
||||||
|
http://paperless.lan {
|
||||||
|
reverse_proxy paperless-ngx:8000
|
||||||
|
}
|
||||||
|
|
||||||
|
http://download.lan {
|
||||||
|
reverse_proxy pyload:8000
|
||||||
|
}
|
||||||
|
|
||||||
|
http://uptime.lan {
|
||||||
|
reverse_proxy uptime-kuma:3001
|
||||||
|
}
|
||||||
|
|
||||||
|
http://torrent.lan {
|
||||||
|
reverse_proxy transmission:9091
|
||||||
|
}
|
||||||
|
|
||||||
|
:80, :443 {
|
||||||
|
respond 404
|
||||||
|
}
|
|
@ -1,54 +1,25 @@
|
||||||
services:
|
services:
|
||||||
|
|
||||||
traefik:
|
caddy:
|
||||||
image: traefik:${TRAEFIK_VERSION}
|
image: caddy:2
|
||||||
restart: always
|
restart: unless-stopped
|
||||||
ports:
|
ports:
|
||||||
- "80:80"
|
- 80:80
|
||||||
- "443:443"
|
volumes:
|
||||||
|
- ${VOLUMES_PATH}/proxy/caddy/data:/data
|
||||||
|
- ${VOLUMES_PATH}/proxy/caddy/config:/config
|
||||||
|
- ./Caddyfile:/etc/caddy/Caddyfile:ro
|
||||||
networks:
|
networks:
|
||||||
- web
|
- web
|
||||||
- dockersocket
|
|
||||||
volumes:
|
|
||||||
- "$PWD/traefik.yml:/etc/traefik/traefik.yml"
|
|
||||||
- "$PWD/extraProviders/:/extraProviders:ro"
|
|
||||||
labels:
|
|
||||||
- "traefik.enable=true"
|
|
||||||
- "traefik.http.routers.traefik.rule=Host(`traefik.${HOSTNAME}`)"
|
|
||||||
- "traefik.http.routers.traefik.entrypoints=web"
|
|
||||||
- "traefik.http.services.traefik.loadbalancer.server.port=8080"
|
|
||||||
- "docker.group=proxy"
|
|
||||||
extra_hosts:
|
|
||||||
- host.docker.internal:172.17.0.1
|
|
||||||
depends_on:
|
|
||||||
- docker-socket-proxy
|
|
||||||
|
|
||||||
|
|
||||||
docker-socket-proxy:
|
whoami:
|
||||||
image: tecnativa/docker-socket-proxy
|
image: containous/whoami
|
||||||
restart: unless-stopped
|
|
||||||
volumes:
|
|
||||||
- /var/run/docker.sock:/var/run/docker.sock:ro
|
|
||||||
environment:
|
|
||||||
# grant privileges as environment variables: https://github.com/Tecnativa/docker-socket-proxy#grant-or-revoke-access-to-certain-api-sections
|
|
||||||
- CONTAINERS=1
|
|
||||||
- INFO=1
|
|
||||||
networks:
|
networks:
|
||||||
- dockersocket
|
- web
|
||||||
privileged: true
|
labels:
|
||||||
|
- "docker.group=proxy"
|
||||||
|
restart: unless-stopped
|
||||||
# whoami:
|
|
||||||
# image: containous/whoami
|
|
||||||
# networks:
|
|
||||||
# - web
|
|
||||||
# labels:
|
|
||||||
# - "traefik.enable=true"
|
|
||||||
# - "traefik.http.routers.whoami.rule=Host(`whoami.${HOSTNAME}`)"
|
|
||||||
# - "traefik.http.routers.whoami.entrypoints=web"
|
|
||||||
# - "traefik.http.services.whoami.loadbalancer.server.port=80"
|
|
||||||
# - "docker.group=proxy"
|
|
||||||
# restart: unless-stopped
|
|
||||||
|
|
||||||
|
|
||||||
networks:
|
networks:
|
||||||
|
|
|
@ -1,23 +0,0 @@
|
||||||
debug: true
|
|
||||||
checkNewVersion: true
|
|
||||||
logLevel: INFO
|
|
||||||
#defaultEntryPoints: ["https","http"]
|
|
||||||
defaultEntryPoints: [http]
|
|
||||||
|
|
||||||
api:
|
|
||||||
insecure: true
|
|
||||||
|
|
||||||
providers:
|
|
||||||
docker:
|
|
||||||
exposedbydefault: false
|
|
||||||
endpoint: "tcp://docker-socket-proxy:2375"
|
|
||||||
|
|
||||||
file:
|
|
||||||
directory: /extraProviders
|
|
||||||
|
|
||||||
|
|
||||||
entryPoints:
|
|
||||||
web:
|
|
||||||
address: ':80'
|
|
||||||
|
|
||||||
|
|
|
@ -10,25 +10,23 @@ services:
|
||||||
restart: unless-stopped
|
restart: unless-stopped
|
||||||
network_mode: host
|
network_mode: host
|
||||||
labels:
|
labels:
|
||||||
- "traefik.enable=true"
|
|
||||||
- "traefik.http.routers.hassi.rule=Host(`hassi.${DOMAIN}`)"
|
|
||||||
- "traefik.http.routers.hassi.entrypoints=web"
|
|
||||||
- "traefik.http.services.hassi.loadbalancer.server.port=8123"
|
|
||||||
- "docker.group=smartHome"
|
- "docker.group=smartHome"
|
||||||
|
|
||||||
|
|
||||||
mqttbroker:
|
mqttbroker:
|
||||||
image: eclipse-mosquitto:${MOSQUITTO_VERSION}
|
image: eclipse-mosquitto:${MOSQUITTO_VERSION}
|
||||||
restart: unless-stopped
|
restart: unless-stopped
|
||||||
network_mode: host
|
networks:
|
||||||
expose:
|
- smarthome
|
||||||
- 1883
|
ports:
|
||||||
#ports:
|
- 1883:1883 # mqtt over TCP
|
||||||
# - 1883:1883 # mqtt over TCP
|
|
||||||
# - 9001:9001 # Websockets
|
# - 9001:9001 # Websockets
|
||||||
volumes:
|
volumes:
|
||||||
- ./mosquitto.conf:/mosquitto/config/mosquitto.conf
|
- ./mosquitto.conf:/mosquitto/config/mosquitto.conf
|
||||||
- ${VOLUMES_PATH}/smartHome/mosquitto/data:/mosquitto/data
|
- ${VOLUMES_PATH}/smartHome/mosquitto/data:/mosquitto/data
|
||||||
- ${VOLUMES_PATH}/smartHome/mosquitto/log:/mosquitto/log
|
- ${VOLUMES_PATH}/smartHome/mosquitto/log:/mosquitto/log
|
||||||
|
labels:
|
||||||
|
- "docker.group=smartHome"
|
||||||
|
|
||||||
|
|
||||||
zigbee2mqtt:
|
zigbee2mqtt:
|
||||||
|
@ -43,14 +41,12 @@ services:
|
||||||
- ${ZIGBEE_DEVICE}:/dev/ttyACM0
|
- ${ZIGBEE_DEVICE}:/dev/ttyACM0
|
||||||
networks:
|
networks:
|
||||||
- web
|
- web
|
||||||
|
- smarthome
|
||||||
labels:
|
labels:
|
||||||
- "traefik.enable=true"
|
|
||||||
- "traefik.http.routers.zigbee2mqtt.rule=Host(`zigbee2mqtt.${DOMAIN}`)"
|
|
||||||
- "traefik.http.routers.zigbee2mqtt.entrypoints=web"
|
|
||||||
- "traefik.http.services.zigbee2mqtt.loadbalancer.server.port=8080"
|
|
||||||
- "docker.group=smartHome"
|
- "docker.group=smartHome"
|
||||||
|
|
||||||
|
|
||||||
networks:
|
networks:
|
||||||
web:
|
web:
|
||||||
external: true
|
external: true
|
||||||
|
smarthome:
|
||||||
|
|
|
@ -7,7 +7,6 @@ function up {
|
||||||
|
|
||||||
up proxy;
|
up proxy;
|
||||||
up monitoring;
|
up monitoring;
|
||||||
up telegraf;
|
|
||||||
up smartHome;
|
up smartHome;
|
||||||
up dashboard;
|
up dashboard;
|
||||||
up download;
|
up download;
|
||||||
|
|
10
start-min.sh
10
start-min.sh
|
@ -1,10 +0,0 @@
|
||||||
#/bin/bash/
|
|
||||||
|
|
||||||
|
|
||||||
function up {
|
|
||||||
(cd "$1" && echo "[$1]" && docker compose up -d "${@:2}");
|
|
||||||
}
|
|
||||||
|
|
||||||
up proxy;
|
|
||||||
up telegraf;
|
|
||||||
|
|
|
@ -1,37 +0,0 @@
|
||||||
services:
|
|
||||||
|
|
||||||
host:
|
|
||||||
image: telegraf:${TELEGRAF_VERSION}
|
|
||||||
restart: unless-stopped
|
|
||||||
environment:
|
|
||||||
- HOST_MOUNT_PREFIX=/hostfs
|
|
||||||
- HOST_PROC=/hostfs/proc
|
|
||||||
- HOST_SYS=/hostfs/sys
|
|
||||||
- HOST_ETC=/hostfs/etc
|
|
||||||
- HOST_VAR=/hostfs/var
|
|
||||||
- HOST_RUN=/hostfs/run
|
|
||||||
volumes:
|
|
||||||
- ./telegraf_host.conf:/etc/telegraf/telegraf.conf:ro
|
|
||||||
- /var/run/utmp:/var/run/utmp:ro
|
|
||||||
- /:/hostfs:ro
|
|
||||||
network_mode: "host"
|
|
||||||
labels:
|
|
||||||
- "docker.group=telegraf"
|
|
||||||
|
|
||||||
net:
|
|
||||||
image: telegraf:${TELEGRAF_VERSION}
|
|
||||||
restart: unless-stopped
|
|
||||||
volumes:
|
|
||||||
- ./telegraf_net.conf:/etc/telegraf/telegraf.conf:ro
|
|
||||||
networks:
|
|
||||||
- dockersocket
|
|
||||||
labels:
|
|
||||||
- "docker.group=telegraf"
|
|
||||||
dns:
|
|
||||||
- ${LOCAL_DNS_SERVER}
|
|
||||||
environment:
|
|
||||||
- "HOST_NAME=${HOST_NAME}"
|
|
||||||
|
|
||||||
networks:
|
|
||||||
dockersocket:
|
|
||||||
external: true
|
|
|
@ -1,12 +0,0 @@
|
||||||
[meta]
|
|
||||||
dir = "/var/lib/influxdb/meta"
|
|
||||||
|
|
||||||
[data]
|
|
||||||
dir = "/var/lib/influxdb/data"
|
|
||||||
wal-dir = "/var/lib/influxdb/wal"
|
|
||||||
max-concurrent-compactions = 1
|
|
||||||
|
|
||||||
[monitor]
|
|
||||||
store-enabled = false
|
|
||||||
store-database = "_internal"
|
|
||||||
store-interval = "10s"
|
|
|
@ -1,369 +0,0 @@
|
||||||
# Telegraf Configuration
|
|
||||||
#
|
|
||||||
# Telegraf is entirely plugin driven. All metrics are gathered from the
|
|
||||||
# declared inputs, and sent to the declared outputs.
|
|
||||||
#
|
|
||||||
# Plugins must be declared in here to be active.
|
|
||||||
# To deactivate a plugin, comment out the name and any variables.
|
|
||||||
#
|
|
||||||
# Use 'telegraf -config telegraf.conf -test' to see what metrics a config
|
|
||||||
# file would generate.
|
|
||||||
#
|
|
||||||
# Environment variables can be used anywhere in this config file, simply surround
|
|
||||||
# them with ${}. For strings the variable must be within quotes (ie, "${STR_VAR}"),
|
|
||||||
# for numbers and booleans they should be plain (ie, ${INT_VAR}, ${BOOL_VAR})
|
|
||||||
|
|
||||||
# Config Sample under https://github.com/influxdata/telegraf/blob/master/etc/telegraf.conf
|
|
||||||
|
|
||||||
# Global tags can be specified here in key="value" format.
|
|
||||||
[global_tags]
|
|
||||||
# datacenter
|
|
||||||
dc="fzirker.lan"
|
|
||||||
|
|
||||||
# Configuration for telegraf agent
|
|
||||||
[agent]
|
|
||||||
## Default data collection interval for all inputs
|
|
||||||
interval = "10s"
|
|
||||||
## Rounds collection interval to 'interval'
|
|
||||||
## ie, if interval="10s" then always collect on :00, :10, :20, etc.
|
|
||||||
round_interval = true
|
|
||||||
|
|
||||||
## Telegraf will send metrics to outputs in batches of at most
|
|
||||||
## metric_batch_size metrics.
|
|
||||||
## This controls the size of writes that Telegraf sends to output plugins.
|
|
||||||
metric_batch_size = 1000
|
|
||||||
|
|
||||||
## Maximum number of unwritten metrics per output. Increasing this value
|
|
||||||
## allows for longer periods of output downtime without dropping metrics at the
|
|
||||||
## cost of higher maximum memory usage.
|
|
||||||
metric_buffer_limit = 10000
|
|
||||||
|
|
||||||
## Collection jitter is used to jitter the collection by a random amount.
|
|
||||||
## Each plugin will sleep for a random time within jitter before collecting.
|
|
||||||
## This can be used to avoid many plugins querying things like sysfs at the
|
|
||||||
## same time, which can have a measurable effect on the system.
|
|
||||||
collection_jitter = "0s"
|
|
||||||
|
|
||||||
## Default flushing interval for all outputs. Maximum flush_interval will be
|
|
||||||
## flush_interval + flush_jitter
|
|
||||||
flush_interval = "10s"
|
|
||||||
## Jitter the flush interval by a random amount. This is primarily to avoid
|
|
||||||
## large write spikes for users running a large number of telegraf instances.
|
|
||||||
## ie, a jitter of 5s and interval 10s means flushes will happen every 10-15s
|
|
||||||
flush_jitter = "0s"
|
|
||||||
|
|
||||||
## By default or when set to "0s", precision will be set to the same
|
|
||||||
## timestamp order as the collection interval, with the maximum being 1s.
|
|
||||||
## ie, when interval = "10s", precision will be "1s"
|
|
||||||
## when interval = "250ms", precision will be "1ms"
|
|
||||||
## Precision will NOT be used for service inputs. It is up to each individual
|
|
||||||
## service input to set the timestamp at the appropriate precision.
|
|
||||||
## Valid time units are "ns", "us" (or "µs"), "ms", "s".
|
|
||||||
precision = ""
|
|
||||||
|
|
||||||
## Override default hostname, if empty use os.Hostname()
|
|
||||||
hostname = ""
|
|
||||||
## If set to true, do no set the "host" tag in the telegraf agent.
|
|
||||||
omit_hostname = false
|
|
||||||
|
|
||||||
|
|
||||||
###############################################################################
|
|
||||||
# OUTPUT PLUGINS #
|
|
||||||
###############################################################################
|
|
||||||
|
|
||||||
# Configuration for sending metrics to InfluxDB
|
|
||||||
[[outputs.influxdb]]
|
|
||||||
## The full HTTP or UDP URL for your InfluxDB instance.
|
|
||||||
##
|
|
||||||
## Multiple URLs can be specified for a single cluster, only ONE of the
|
|
||||||
## urls will be written to each interval.
|
|
||||||
# urls = ["unix:///var/run/influxdb.sock"]
|
|
||||||
# urls = ["udp://127.0.0.1:8089"]
|
|
||||||
# urls = ["http://127.0.0.1:8086"]
|
|
||||||
urls = ["http://influxdb.lan"] # required
|
|
||||||
|
|
||||||
|
|
||||||
###############################################################################
|
|
||||||
# INPUT PLUGINS #
|
|
||||||
###############################################################################
|
|
||||||
|
|
||||||
|
|
||||||
# Read metrics about cpu usage
|
|
||||||
[[inputs.cpu]]
|
|
||||||
## Whether to report per-cpu stats or not
|
|
||||||
percpu = true
|
|
||||||
## Whether to report total system cpu stats or not
|
|
||||||
totalcpu = true
|
|
||||||
## If true, collect raw CPU time metrics.
|
|
||||||
collect_cpu_time = false
|
|
||||||
## If true, compute and report the sum of all non-idle CPU states.
|
|
||||||
report_active = false
|
|
||||||
|
|
||||||
|
|
||||||
# Read metrics about disk usage by mount point
|
|
||||||
[[inputs.disk]]
|
|
||||||
## By default stats will be gathered for all mount points.
|
|
||||||
## Set mount_points will restrict the stats to only the specified mount points.
|
|
||||||
mount_points = ["/hostfs", "/hostfs/mnt/sdcard"]
|
|
||||||
|
|
||||||
## Ignore mount points by filesystem type.
|
|
||||||
ignore_fs = ["tmpfs", "devtmpfs", "devfs", "iso9660", "overlay", "aufs", "squashfs"]
|
|
||||||
|
|
||||||
[[inputs.disk]]
|
|
||||||
# Festplatte lange schlafen lassen :)
|
|
||||||
interval = "12h"
|
|
||||||
mount_points = ["/hostfs/mnt/wdhdd"]
|
|
||||||
|
|
||||||
|
|
||||||
# Read metrics about disk IO by device
|
|
||||||
[[inputs.diskio]]
|
|
||||||
## By default, telegraf will gather stats for all devices including
|
|
||||||
## disk partitions.
|
|
||||||
## Setting devices will restrict the stats to the specified devices.
|
|
||||||
# devices = ["sda", "sdb", "vd*"]
|
|
||||||
## Uncomment the following line if you need disk serial numbers.
|
|
||||||
# skip_serial_number = false
|
|
||||||
#
|
|
||||||
## On systems which support it, device metadata can be added in the form of
|
|
||||||
## tags.
|
|
||||||
## Currently only Linux is supported via udev properties. You can view
|
|
||||||
## available properties for a device by running:
|
|
||||||
## 'udevadm info -q property -n /dev/sda'
|
|
||||||
## Note: Most, but not all, udev properties can be accessed this way. Properties
|
|
||||||
## that are currently inaccessible include DEVTYPE, DEVNAME, and DEVPATH.
|
|
||||||
# device_tags = ["ID_FS_TYPE", "ID_FS_USAGE"]
|
|
||||||
#
|
|
||||||
## Using the same metadata source as device_tags, you can also customize the
|
|
||||||
## name of the device via templates.
|
|
||||||
## The 'name_templates' parameter is a list of templates to try and apply to
|
|
||||||
## the device. The template may contain variables in the form of '$PROPERTY' or
|
|
||||||
## '${PROPERTY}'. The first template which does not contain any variables not
|
|
||||||
## present for the device is used as the device name tag.
|
|
||||||
## The typical use case is for LVM volumes, to get the VG/LV name instead of
|
|
||||||
## the near-meaningless DM-0 name.
|
|
||||||
# name_templates = ["$ID_FS_LABEL","$DM_VG_NAME/$DM_LV_NAME"]
|
|
||||||
|
|
||||||
|
|
||||||
# Get kernel statistics from /proc/stat
|
|
||||||
[[inputs.kernel]]
|
|
||||||
# no configuration
|
|
||||||
|
|
||||||
|
|
||||||
# Read metrics about memory usage
|
|
||||||
[[inputs.mem]]
|
|
||||||
# no configuration
|
|
||||||
|
|
||||||
|
|
||||||
# Get the number of processes and group them by status
|
|
||||||
[[inputs.processes]]
|
|
||||||
# no configuration
|
|
||||||
|
|
||||||
|
|
||||||
# Read metrics about swap memory usage
|
|
||||||
[[inputs.swap]]
|
|
||||||
# no configuration
|
|
||||||
|
|
||||||
|
|
||||||
# Read metrics about system load & uptime
|
|
||||||
[[inputs.system]]
|
|
||||||
## Uncomment to remove deprecated metrics.
|
|
||||||
# fielddrop = ["uptime_format"]
|
|
||||||
|
|
||||||
|
|
||||||
# # Read metrics about network interface usage
|
|
||||||
[[inputs.net]]
|
|
||||||
## By default, telegraf gathers stats from any up interface (excluding loopback)
|
|
||||||
## Setting interfaces will tell it to gather these explicit interfaces,
|
|
||||||
## regardless of status.
|
|
||||||
##
|
|
||||||
interfaces = ["enp2s0"]
|
|
||||||
##
|
|
||||||
## On linux systems telegraf also collects protocol stats.
|
|
||||||
## Setting ignore_protocol_stats to true will skip reporting of protocol metrics.
|
|
||||||
##
|
|
||||||
# ignore_protocol_stats = false
|
|
||||||
##
|
|
||||||
|
|
||||||
|
|
||||||
# # Read TCP metrics such as established, time wait and sockets counts.
|
|
||||||
[[inputs.netstat]]
|
|
||||||
# no configuration
|
|
||||||
|
|
||||||
|
|
||||||
# Collect kernel snmp counters and network interface statistics
|
|
||||||
[[inputs.nstat]]
|
|
||||||
## file paths for proc files. If empty default paths will be used:
|
|
||||||
## /proc/net/netstat, /proc/net/snmp, /proc/net/snmp6
|
|
||||||
## These can also be overridden with env variables, see README.
|
|
||||||
proc_net_netstat = "/proc/net/netstat"
|
|
||||||
proc_net_snmp = "/proc/net/snmp"
|
|
||||||
proc_net_snmp6 = "/proc/net/snmp6"
|
|
||||||
## dump metrics with 0 values too
|
|
||||||
dump_zeros = true
|
|
||||||
|
|
||||||
|
|
||||||
# # Monitor process cpu and memory usage
|
|
||||||
# [[inputs.procstat]]
|
|
||||||
# ## PID file to monitor process
|
|
||||||
# pid_file = "/var/run/nginx.pid"
|
|
||||||
# ## executable name (ie, pgrep <exe>)
|
|
||||||
# # exe = "nginx"
|
|
||||||
# ## pattern as argument for pgrep (ie, pgrep -f <pattern>)
|
|
||||||
# # pattern = "nginx"
|
|
||||||
# ## user as argument for pgrep (ie, pgrep -u <user>)
|
|
||||||
# # user = "nginx"
|
|
||||||
# ## Systemd unit name
|
|
||||||
# # systemd_unit = "nginx.service"
|
|
||||||
# ## CGroup name or path
|
|
||||||
# # cgroup = "systemd/system.slice/nginx.service"
|
|
||||||
#
|
|
||||||
# ## Windows service name
|
|
||||||
# # win_service = ""
|
|
||||||
#
|
|
||||||
# ## override for process_name
|
|
||||||
# ## This is optional; default is sourced from /proc/<pid>/status
|
|
||||||
# # process_name = "bar"
|
|
||||||
#
|
|
||||||
# ## Field name prefix
|
|
||||||
# # prefix = ""
|
|
||||||
#
|
|
||||||
# ## When true add the full cmdline as a tag.
|
|
||||||
# # cmdline_tag = false
|
|
||||||
#
|
|
||||||
# ## Add PID as a tag instead of a field; useful to differentiate between
|
|
||||||
# ## processes whose tags are otherwise the same. Can create a large number
|
|
||||||
# ## of series, use judiciously.
|
|
||||||
# # pid_tag = false
|
|
||||||
#
|
|
||||||
# ## Method to use when finding process IDs. Can be one of 'pgrep', or
|
|
||||||
# ## 'native'. The pgrep finder calls the pgrep executable in the PATH while
|
|
||||||
# ## the native finder performs the search directly in a manor dependent on the
|
|
||||||
# ## platform. Default is 'pgrep'
|
|
||||||
# # pid_finder = "pgrep"
|
|
||||||
|
|
||||||
|
|
||||||
# # Read metrics from storage devices supporting S.M.A.R.T.
|
|
||||||
# [[inputs.smart]]
|
|
||||||
# ## Optionally specify the path to the smartctl executable
|
|
||||||
# # path = "/usr/bin/smartctl"
|
|
||||||
#
|
|
||||||
# ## On most platforms smartctl requires root access.
|
|
||||||
# ## Setting 'use_sudo' to true will make use of sudo to run smartctl.
|
|
||||||
# ## Sudo must be configured to to allow the telegraf user to run smartctl
|
|
||||||
# ## without a password.
|
|
||||||
# # use_sudo = false
|
|
||||||
#
|
|
||||||
# ## Skip checking disks in this power mode. Defaults to
|
|
||||||
# ## "standby" to not wake up disks that have stoped rotating.
|
|
||||||
# ## See --nocheck in the man pages for smartctl.
|
|
||||||
# ## smartctl version 5.41 and 5.42 have faulty detection of
|
|
||||||
# ## power mode and might require changing this value to
|
|
||||||
# ## "never" depending on your disks.
|
|
||||||
# # nocheck = "standby"
|
|
||||||
#
|
|
||||||
# ## Gather all returned S.M.A.R.T. attribute metrics and the detailed
|
|
||||||
# ## information from each drive into the 'smart_attribute' measurement.
|
|
||||||
# # attributes = false
|
|
||||||
#
|
|
||||||
# ## Optionally specify devices to exclude from reporting.
|
|
||||||
# # excludes = [ "/dev/pass6" ]
|
|
||||||
#
|
|
||||||
# ## Optionally specify devices and device type, if unset
|
|
||||||
# ## a scan (smartctl --scan) for S.M.A.R.T. devices will
|
|
||||||
# ## done and all found will be included except for the
|
|
||||||
# ## excluded in excludes.
|
|
||||||
# # devices = [ "/dev/ada0 -d atacam" ]
|
|
||||||
#
|
|
||||||
# ## Timeout for the smartctl command to complete.
|
|
||||||
# # timeout = "30s"
|
|
||||||
|
|
||||||
|
|
||||||
# # Sysstat metrics collector
|
|
||||||
# [[inputs.sysstat]]
|
|
||||||
# ## Path to the sadc command.
|
|
||||||
# #
|
|
||||||
# ## Common Defaults:
|
|
||||||
# ## Debian/Ubuntu: /usr/lib/sysstat/sadc
|
|
||||||
# ## Arch: /usr/lib/sa/sadc
|
|
||||||
# ## RHEL/CentOS: /usr/lib64/sa/sadc
|
|
||||||
# sadc_path = "/usr/lib/sa/sadc" # required
|
|
||||||
#
|
|
||||||
# ## Path to the sadf command, if it is not in PATH
|
|
||||||
# # sadf_path = "/usr/bin/sadf"
|
|
||||||
#
|
|
||||||
# ## Activities is a list of activities, that are passed as argument to the
|
|
||||||
# ## sadc collector utility (e.g: DISK, SNMP etc...)
|
|
||||||
# ## The more activities that are added, the more data is collected.
|
|
||||||
# # activities = ["DISK"]
|
|
||||||
#
|
|
||||||
# ## Group metrics to measurements.
|
|
||||||
# ##
|
|
||||||
# ## If group is false each metric will be prefixed with a description
|
|
||||||
# ## and represents itself a measurement.
|
|
||||||
# ##
|
|
||||||
# ## If Group is true, corresponding metrics are grouped to a single measurement.
|
|
||||||
# # group = true
|
|
||||||
#
|
|
||||||
# ## Options for the sadf command. The values on the left represent the sadf
|
|
||||||
# ## options and the values on the right their description (which are used for
|
|
||||||
# ## grouping and prefixing metrics).
|
|
||||||
# ##
|
|
||||||
# ## Run 'sar -h' or 'man sar' to find out the supported options for your
|
|
||||||
# ## sysstat version.
|
|
||||||
# [inputs.sysstat.options]
|
|
||||||
# -C = "cpu"
|
|
||||||
# -B = "paging"
|
|
||||||
# -b = "io"
|
|
||||||
# -d = "disk" # requires DISK activity
|
|
||||||
# "-n ALL" = "network"
|
|
||||||
# "-P ALL" = "per_cpu"
|
|
||||||
# -q = "queue"
|
|
||||||
# -R = "mem"
|
|
||||||
# -r = "mem_util"
|
|
||||||
# -S = "swap_util"
|
|
||||||
# -u = "cpu_util"
|
|
||||||
# -v = "inode"
|
|
||||||
# -W = "swap"
|
|
||||||
# -w = "task"
|
|
||||||
# # -H = "hugepages" # only available for newer linux distributions
|
|
||||||
# # "-I ALL" = "interrupts" # requires INT activity
|
|
||||||
#
|
|
||||||
# ## Device tags can be used to add additional tags for devices.
|
|
||||||
# ## For example the configuration below adds a tag vg with value rootvg for
|
|
||||||
# ## all metrics with sda devices.
|
|
||||||
# # [[inputs.sysstat.device_tags.sda]]
|
|
||||||
# # vg = "rootvg"
|
|
||||||
|
|
||||||
|
|
||||||
# Gather systemd units state
|
|
||||||
# [[inputs.systemd_units]]
|
|
||||||
# ## Set timeout for systemctl execution
|
|
||||||
# # timeout = "1s"
|
|
||||||
# #
|
|
||||||
# ## Filter for a specific unit type, default is "service", other possible
|
|
||||||
# ## values are "socket", "target", "device", "mount", "automount", "swap",
|
|
||||||
# ## "timer", "path", "slice" and "scope ":
|
|
||||||
# unittype = "service"
|
|
||||||
|
|
||||||
# # Read metrics about temperature
|
|
||||||
[[inputs.temp]]
|
|
||||||
# no configuration
|
|
||||||
|
|
||||||
|
|
||||||
# # Reads metrics from a SSL certificate
|
|
||||||
#[[inputs.x509_cert]]
|
|
||||||
## List certificate sources
|
|
||||||
#sources = ["/etc/ssl/certs/ssl-cert-snakeoil.pem", "tcp://example.org:443"]
|
|
||||||
#sources = ["https://florianzirker.de:443", "https://cloud.florianzirker.de:443", "https://wallabag.florianzirker.de:443", "https://gitea.florianzirker.de:443", "https://meet.florianzirker.de:443", "https://www.feuerwehr-kapsweyer.de:443"]
|
|
||||||
|
|
||||||
## Timeout for SSL connection
|
|
||||||
# timeout = "5s"
|
|
||||||
|
|
||||||
## Pass a different name into the TLS request (Server Name Indication)
|
|
||||||
## example: server_name = "myhost.example.org"
|
|
||||||
# server_name = ""
|
|
||||||
|
|
||||||
## Optional TLS Config
|
|
||||||
# tls_ca = "/etc/telegraf/ca.pem"
|
|
||||||
# tls_cert = "/etc/telegraf/cert.pem"
|
|
||||||
# tls_key = "/etc/telegraf/key.pem"
|
|
|
@ -1,155 +0,0 @@
|
||||||
# Telegraf Configuration
|
|
||||||
#
|
|
||||||
# Telegraf is entirely plugin driven. All metrics are gathered from the
|
|
||||||
# declared inputs, and sent to the declared outputs.
|
|
||||||
#
|
|
||||||
# Plugins must be declared in here to be active.
|
|
||||||
# To deactivate a plugin, comment out the name and any variables.
|
|
||||||
#
|
|
||||||
# Use 'telegraf -config telegraf.conf -test' to see what metrics a config
|
|
||||||
# file would generate.
|
|
||||||
#
|
|
||||||
# Environment variables can be used anywhere in this config file, simply surround
|
|
||||||
# them with ${}. For strings the variable must be within quotes (ie, "${STR_VAR}"),
|
|
||||||
# for numbers and booleans they should be plain (ie, ${INT_VAR}, ${BOOL_VAR})
|
|
||||||
|
|
||||||
# Config Sample under https://github.com/influxdata/telegraf/blob/master/etc/telegraf.conf
|
|
||||||
|
|
||||||
# Global tags can be specified here in key="value" format.
|
|
||||||
[global_tags]
|
|
||||||
# datacenter
|
|
||||||
dc="fzirker.lan"
|
|
||||||
|
|
||||||
# Configuration for telegraf agent
|
|
||||||
[agent]
|
|
||||||
## Default data collection interval for all inputs
|
|
||||||
interval = "10s"
|
|
||||||
## Rounds collection interval to 'interval'
|
|
||||||
## ie, if interval="10s" then always collect on :00, :10, :20, etc.
|
|
||||||
round_interval = true
|
|
||||||
|
|
||||||
## Telegraf will send metrics to outputs in batches of at most
|
|
||||||
## metric_batch_size metrics.
|
|
||||||
## This controls the size of writes that Telegraf sends to output plugins.
|
|
||||||
metric_batch_size = 1000
|
|
||||||
|
|
||||||
## Maximum number of unwritten metrics per output. Increasing this value
|
|
||||||
## allows for longer periods of output downtime without dropping metrics at the
|
|
||||||
## cost of higher maximum memory usage.
|
|
||||||
metric_buffer_limit = 10000
|
|
||||||
|
|
||||||
## Collection jitter is used to jitter the collection by a random amount.
|
|
||||||
## Each plugin will sleep for a random time within jitter before collecting.
|
|
||||||
## This can be used to avoid many plugins querying things like sysfs at the
|
|
||||||
## same time, which can have a measurable effect on the system.
|
|
||||||
collection_jitter = "0s"
|
|
||||||
|
|
||||||
## Default flushing interval for all outputs. Maximum flush_interval will be
|
|
||||||
## flush_interval + flush_jitter
|
|
||||||
flush_interval = "10s"
|
|
||||||
## Jitter the flush interval by a random amount. This is primarily to avoid
|
|
||||||
## large write spikes for users running a large number of telegraf instances.
|
|
||||||
## ie, a jitter of 5s and interval 10s means flushes will happen every 10-15s
|
|
||||||
flush_jitter = "0s"
|
|
||||||
|
|
||||||
## By default or when set to "0s", precision will be set to the same
|
|
||||||
## timestamp order as the collection interval, with the maximum being 1s.
|
|
||||||
## ie, when interval = "10s", precision will be "1s"
|
|
||||||
## when interval = "250ms", precision will be "1ms"
|
|
||||||
## Precision will NOT be used for service inputs. It is up to each individual
|
|
||||||
## service input to set the timestamp at the appropriate precision.
|
|
||||||
## Valid time units are "ns", "us" (or "µs"), "ms", "s".
|
|
||||||
precision = ""
|
|
||||||
|
|
||||||
## Override default hostname, if empty use os.Hostname()
|
|
||||||
hostname = "${HOST_NAME}"
|
|
||||||
## If set to true, do no set the "host" tag in the telegraf agent.
|
|
||||||
omit_hostname = false
|
|
||||||
|
|
||||||
|
|
||||||
###############################################################################
|
|
||||||
# OUTPUT PLUGINS #
|
|
||||||
###############################################################################
|
|
||||||
|
|
||||||
# Configuration for sending metrics to InfluxDB
|
|
||||||
[[outputs.influxdb]]
|
|
||||||
## The full HTTP or UDP URL for your InfluxDB instance.
|
|
||||||
##
|
|
||||||
## Multiple URLs can be specified for a single cluster, only ONE of the
|
|
||||||
## urls will be written to each interval.
|
|
||||||
# urls = ["unix:///var/run/influxdb.sock"]
|
|
||||||
# urls = ["udp://127.0.0.1:8089"]
|
|
||||||
# urls = ["http://127.0.0.1:8086"]
|
|
||||||
urls = ["http://influxdb.lan"] # required
|
|
||||||
|
|
||||||
|
|
||||||
###############################################################################
|
|
||||||
# INPUT PLUGINS #
|
|
||||||
###############################################################################
|
|
||||||
|
|
||||||
# Read metrics about docker containers
|
|
||||||
[[inputs.docker]]
|
|
||||||
## Docker Endpoint
|
|
||||||
## To use TCP, set endpoint = "tcp://[ip]:[port]"
|
|
||||||
## To use environment variables (ie, docker-machine), set endpoint = "ENV"
|
|
||||||
#endpoint = "unix:///var/run/docker.sock"
|
|
||||||
endpoint = "tcp://docker-socket-proxy:2375"
|
|
||||||
|
|
||||||
## Set to true to collect Swarm metrics(desired_replicas, running_replicas)
|
|
||||||
gather_services = false
|
|
||||||
|
|
||||||
## Only collect metrics for these containers, collect all if empty
|
|
||||||
container_names = []
|
|
||||||
|
|
||||||
## Set the source tag for the metrics to the container ID hostname, eg first 12 chars
|
|
||||||
source_tag = false
|
|
||||||
|
|
||||||
## Containers to include and exclude. Globs accepted.
|
|
||||||
## Note that an empty array for both will include all containers
|
|
||||||
container_name_include = []
|
|
||||||
container_name_exclude = []
|
|
||||||
|
|
||||||
## Container states to include and exclude. Globs accepted.
|
|
||||||
## When empty only containers in the "running" state will be captured.
|
|
||||||
## example: container_state_include = ["created", "restarting", "running", "removing", "paused", "exited", "dead"]
|
|
||||||
## example: container_state_exclude = ["created", "restarting", "running", "removing", "paused", "exited", "dead"]
|
|
||||||
# container_state_include = []
|
|
||||||
# container_state_exclude = []
|
|
||||||
|
|
||||||
## Timeout for docker list, info, and stats commands
|
|
||||||
timeout = "5s"
|
|
||||||
|
|
||||||
## Whether to report for each container per-device blkio (8:0, 8:1...) and
|
|
||||||
## network (eth0, eth1, ...) stats or not
|
|
||||||
perdevice = true
|
|
||||||
|
|
||||||
## Whether to report for each container total blkio and network stats or not
|
|
||||||
total = false
|
|
||||||
|
|
||||||
## Which environment variables should we use as a tag
|
|
||||||
##tag_env = ["JAVA_HOME", "HEAP_SIZE"]
|
|
||||||
|
|
||||||
## docker labels to include and exclude as tags. Globs accepted.
|
|
||||||
## Note that an empty array for both will include all labels as tags
|
|
||||||
docker_label_include = []
|
|
||||||
docker_label_exclude = []
|
|
||||||
|
|
||||||
## Optional TLS Config
|
|
||||||
# tls_ca = "/etc/telegraf/ca.pem"
|
|
||||||
# tls_cert = "/etc/telegraf/cert.pem"
|
|
||||||
# tls_key = "/etc/telegraf/key.pem"
|
|
||||||
## Use TLS but skip chain & host verification
|
|
||||||
# insecure_skip_verify = false
|
|
||||||
|
|
||||||
|
|
||||||
# # Monitor disks' temperatures using hddtemp
|
|
||||||
# [[inputs.hddtemp]]
|
|
||||||
# ## By default, telegraf gathers temps data from all disks detected by the
|
|
||||||
# ## hddtemp.
|
|
||||||
# ##
|
|
||||||
# ## Only collect temps from the selected disks.
|
|
||||||
# ##
|
|
||||||
# ## A * as the device name will return the temperature values of all disks.
|
|
||||||
# ##
|
|
||||||
# # address = "127.0.0.1:7634"
|
|
||||||
# # devices = ["sda", "*"]
|
|
|
@ -1,19 +0,0 @@
|
||||||
# call this script like "./test-telegraf.sh net:cpu"
|
|
||||||
|
|
||||||
docker run \
|
|
||||||
--rm \
|
|
||||||
--volume $(pwd)/telegraf/telegraf.conf:/telegraf.conf \
|
|
||||||
--volume /:/hostfs:ro \
|
|
||||||
--volume /var/run/utmp:/var/run/utmp:ro \
|
|
||||||
--volume /var/run/docker.sock:/var/run/docker.sock:ro \
|
|
||||||
-e HOST_ETC=/hostfs/etc \
|
|
||||||
-e HOST_PROC=/hostfs/proc \
|
|
||||||
-e HOST_SYS=/hostfs/sys \
|
|
||||||
-e HOST_VAR=/hostfs/var \
|
|
||||||
-e HOST_RUN=/hostfs/run \
|
|
||||||
-e HOST_MOUNT_PREFIX=/hostfs \
|
|
||||||
telegraf \
|
|
||||||
telegraf \
|
|
||||||
--config /telegraf.conf \
|
|
||||||
--input-filter $1 \
|
|
||||||
--test
|
|
|
@ -1,28 +0,0 @@
|
||||||
services:
|
|
||||||
|
|
||||||
stirling-pdf:
|
|
||||||
image: frooodle/s-pdf:latest
|
|
||||||
# ports:
|
|
||||||
# - '8080:8080'
|
|
||||||
networks:
|
|
||||||
- web
|
|
||||||
volumes:
|
|
||||||
- ${VOLUMES_PATH}/tools/stirling-pdf/trainingData:/usr/share/tesseract-ocr/5/tessdata #Required for extra OCR languages
|
|
||||||
- ${VOLUMES_PATH}/tools/stirling-pdf/extraConfigs:/configs
|
|
||||||
# - ${VOLUMES_PATH}/tools/stirling-pdf/customFiles:/customFiles/
|
|
||||||
# - ${VOLUMES_PATH}/tools/stirling-pdf/logs:/logs/
|
|
||||||
environment:
|
|
||||||
- DOCKER_ENABLE_SECURITY=false
|
|
||||||
labels:
|
|
||||||
- "traefik.enable=true"
|
|
||||||
- "traefik.http.routers.pdf.rule=Host(`pdf.${DOMAIN}`)"
|
|
||||||
- "traefik.http.routers.pdf.entrypoints=web"
|
|
||||||
- "traefik.http.services.pdf.loadbalancer.server.port=8080"
|
|
||||||
- "traefik.docker.network=web"
|
|
||||||
- "docker.group=tools"
|
|
||||||
|
|
||||||
networks:
|
|
||||||
paperless:
|
|
||||||
web:
|
|
||||||
external: true
|
|
||||||
|
|
|
@ -18,10 +18,6 @@ services:
|
||||||
- 51413:51413/udp
|
- 51413:51413/udp
|
||||||
restart: unless-stopped
|
restart: unless-stopped
|
||||||
labels:
|
labels:
|
||||||
- "traefik.enable=true"
|
|
||||||
- "traefik.http.routers.torrent.rule=Host(`torrent.${DOMAIN}`)"
|
|
||||||
- "traefik.http.routers.torrent.entrypoints=web"
|
|
||||||
- "traefik.http.services.torrent.loadbalancer.server.port=9091"
|
|
||||||
- "docker.group=torrent"
|
- "docker.group=torrent"
|
||||||
mem_limit: 512m
|
mem_limit: 512m
|
||||||
|
|
||||||
|
|
Loading…
Reference in a new issue