remove monitoring stack

This commit is contained in:
Florian Zirker 2024-10-15 11:35:52 +00:00
parent c298155ceb
commit 113cb283b7
15 changed files with 4 additions and 646 deletions

View file

@ -7,7 +7,6 @@ Hosting the following web services using docker-compose on a public root server:
* [Wallabag](https://www.wallabag.it)
* Small HTML-Site
* Firefox-Sync
* Monitoring-Stack with [Telegraf](https://github.com/influxdata/telegraf), [Influxdb](https://github.com/influxdata/influxdb) and [Grafana](https://github.com/grafana/grafana)
As a reverse proxyy [Traefik](https://traefik.io/traefik/) is used. Traefik also secures all Services with TLS and redirects all HTTP requests to HTTPS. SSL certificates are automatically generated using [Let's Encrypt](https://letsencrypt.org/)

View file

@ -16,7 +16,6 @@ services:
- "traefik.http.routers.ffs.tls.options=intermediate@file"
- "traefik.http.services.ffs.loadbalancer.server.port=5000"
- "docker.group=firefoxsync"
- "diun.enable=true"
restart: unless-stopped
environment:
- SYNCSERVER_PUBLIC_URL=https://firefoxsync.${DOMAIN}
@ -46,7 +45,6 @@ services:
timeout: 5s
labels:
- "docker.group=firefoxsync"
- "diun.enable=true"
networks:

View file

@ -39,7 +39,6 @@ services:
- "traefik.http.middlewares.gitearedir.redirectregex.permanent=true"
- "traefik.http.services.forgejo.loadbalancer.server.port=3000"
- "docker.group=git"
- "diun.enable=true"
db:
@ -59,7 +58,6 @@ services:
timeout: 5s
labels:
- "docker.group=git"
- "diun.enable=true"
networks:

View file

@ -1,185 +0,0 @@
services:
grafana:
image: grafana/grafana:${GRAFANA_VERSION}
restart: unless-stopped
networks:
- web
- grafana
- monitoring
labels:
- "traefik.enable=true"
- "traefik.http.routers.grafana.rule=Host(`monitoring.${DOMAIN}`)"
- "traefik.http.routers.grafana.entrypoints=websecure"
- "traefik.http.routers.grafana.tls.certresolver=netcup"
- "traefik.http.routers.grafana.tls.options=intermediate@file"
- "traefik.http.services.grafana.loadbalancer.server.port=3000"
- "traefik.docker.network=web"
- "docker.group=monitoring"
- "diun.enable=true"
environment:
- GF_DEFAULT_INSTANCE_NAME=monitoring.${DOMAIN}
- GF_SERVER_ROOT_URL=http://monitoring.${DOMAIN}
- GF_SERVER_DOMAIN=monitoring.${DOMAIN}
- GF_SERVER_SERVE_FROM_SUB_PATH=true
- GF_SECURITY_DISABLE_GRAVATAR=true
- GF_AUTH_ANONYMOUS_ENABLED=false
- GF_AUTH_ANONYMOUS_ORG_ROLE=Viewer
- GF_DATABASE_TYPE=postgres
- GF_DATABASE_HOST=grafanadb:5432
- GF_DATABASE_SSL_MODE=disable
- GF_DATABASE_NAME=${POSTGRES_DB}
- GF_DATABASE_USER=${POSTGRES_USER}
- GF_DATABASE_PASSWORD=${POSTGRES_PASSWORD}
- GF_INSTALL_PLUGINS=flant-statusmap-panel,redis-datasource
depends_on:
- influxdb
- grafanadb
grafanadb:
image: postgres:${POSTGRES_VERSION}
volumes:
- ${VOLUMES_PATH}/monitoring/grafanadb:/var/lib/postgresql/data
networks:
- grafana
restart: unless-stopped
environment:
- POSTGRES_USER=${POSTGRES_USER}
- POSTGRES_PASSWORD=${POSTGRES_PASSWORD}
- POSTGRES_DB=${POSTGRES_DB}
healthcheck:
test: ["CMD-SHELL", "pg_isready -U ${POSTGRES_USER} -d ${POSTGRES_DB}"]
interval: 10s
timeout: 5s
labels:
- "docker.group=monitoring"
- "diun.enable=true"
influxdb:
image: influxdb:${INFLUXDB_VERSION}
restart: unless-stopped
networks:
- web
- monitoring
- grafana
environment:
- INFLUXDB_MONITOR_STORE_ENABLED=false
volumes:
- ${VOLUMES_PATH}/monitoring/influxdb/:/var/lib/influxdb
- ${PWD}/influxdb.conf:/etc/influxdb/influxdb.conf:ro
labels:
- "traefik.enable=true"
- "traefik.http.routers.influxdb.rule=Host(`influxdb.${DOMAIN}`)"
- "traefik.http.routers.influxdb.entrypoints=websecure"
- "traefik.http.routers.influxdb.tls.certresolver=netcup"
- "traefik.http.routers.influxdb.tls.options=intermediate@file"
- "traefik.http.services.influxdb.loadbalancer.server.port=8086"
- "traefik.http.routers.influxdb.middlewares=influxauth"
- "traefik.http.middlewares.influxauth.basicauth.users=${INFLUXDB_HTPASSWD}"
- "docker.group=monitoring"
- "diun.enable=true"
prometheus:
image: prom/prometheus
restart: unless-stopped
networks:
- grafana
- monitoring
- web # also used to get traefik metrics
volumes:
- ./prometheus.yml:/etc/prometheus/prometheus.yml
- ${VOLUMES_PATH}/monitoring/prometheus:/prometheus
labels:
- "docker.group=monitoring"
- "diun.enable=true"
# https://github.com/xperimental/nextcloud-exporter
nextcloud-exporter:
image: ghcr.io/xperimental/nextcloud-exporter
networks:
- monitoring
environment:
- NEXTCLOUD_SERVER=${NEXTCLOUD_URL}
- NEXTCLOUD_AUTH_TOKEN=${NEXTCLOUD_MONITORING_AUTH_TOKEN}
- NEXTCLOUD_LISTEN_ADDRESS=:9205
labels:
- "docker.group=monitoring"
- "diun.enable=true"
diun:
image: crazymax/diun:latest
command: serve
volumes:
- "${VOLUMES_PATH}/monitoring/diun/data:/data"
networks:
- dockersocket
hostname: ${HOSTNAME}
environment:
- "TZ=Europe/Berlin"
- "DIUN_WATCH_WORKERS=10"
- "DIUN_WATCH_SCHEDULE=0 */6 * * *"
- "DIUN_WATCH_JITTER=30s"
- "DIUN_PROVIDERS_DOCKER_ENDPOINT=tcp://docker-socket-proxy:2375"
- "DIUN_PROVIDERS_DOCKER_WATCHBYDEFAULT=false"
- "DIUN_NOTIF_NTFY_ENDPOINT=${NTFY_SERVER}"
- "DIUN_NOTIF_NTFY_TOPIC=${NTFY_TOPIC}"
- "DIUN_NOTIF_NTFY_TOKEN=${NTFY_TOKEN}"
restart: always
labels:
- "diun.enable=true"
##################################################################
# here starts data collection of local host
telegraf_host:
image: telegraf:${TELEGRAF_VERSION}
restart: unless-stopped
environment:
- HOST_MOUNT_PREFIX=/hostfs
- HOST_PROC=/hostfs/proc
- HOST_SYS=/hostfs/sys
- HOST_ETC=/hostfs/etc
- HOST_VAR=/hostfs/var
- HOST_RUN=/hostfs/run
env_file:
- ./.env # set environments into container
volumes:
- ./telegraf_host.conf:/etc/telegraf/telegraf.conf:ro
- /var/run/utmp:/var/run/utmp:ro
- /:/hostfs:ro
network_mode: "host"
labels:
- "docker.group=monitoring"
- "diun.enable=true"
depends_on:
- influxdb
telegraf_net:
image: telegraf:${TELEGRAF_VERSION}
restart: unless-stopped
volumes:
- ./telegraf_net.conf:/etc/telegraf/telegraf.conf:ro
networks:
- monitoring
- dockersocket
labels:
- "docker.group=monitoring"
- "diun.enable=true"
depends_on:
- influxdb
networks:
grafana:
monitoring:
external: true
web:
external: true
dockersocket:
external: true

View file

@ -1,12 +0,0 @@
[meta]
dir = "/var/lib/influxdb/meta"
[data]
dir = "/var/lib/influxdb/data"
wal-dir = "/var/lib/influxdb/wal"
max-concurrent-compactions = 1
[monitor]
store-enabled = false
store-database = "_internal"
store-interval = "10s"

View file

@ -1,42 +0,0 @@
# my global config
global:
scrape_interval: 15s # Set the scrape interval to every 15 seconds. Default is every 1 minute.
evaluation_interval: 15s # Evaluate rules every 15 seconds. The default is every 1 minute.
# scrape_timeout is set to the global default (10s).
# Alertmanager configuration
alerting:
alertmanagers:
- static_configs:
- targets:
# - alertmanager:9093
# Load rules once and periodically evaluate them according to the global 'evaluation_interval'.
rule_files:
# - "first_rules.yml"
# - "second_rules.yml"
# A scrape configuration containing exactly one endpoint to scrape:
# Here it's Prometheus itself.
scrape_configs:
# The job name is added as a label `job=<job_name>` to any timeseries scraped from this config.
- job_name: "prometheus"
# metrics_path defaults to '/metrics'
# scheme defaults to 'http'.
static_configs:
- targets: ["localhost:9090"]
- job_name: "traefik"
scrape_interval: 5s
static_configs:
- targets: ["traefik:8080"]
- job_name: "grafana"
static_configs:
- targets: ["grafana:3000"]
- job_name: 'nextcloud'
scrape_interval: 60s
static_configs:
- targets: ['nextcloud-exporter:9205']

View file

@ -1,204 +0,0 @@
# Telegraf Configuration
#
# Telegraf is entirely plugin driven. All metrics are gathered from the
# declared inputs, and sent to the declared outputs.
#
# Plugins must be declared in here to be active.
# To deactivate a plugin, comment out the name and any variables.
#
# Use 'telegraf -config telegraf.conf -test' to see what metrics a config
# file would generate.
#
# Environment variables can be used anywhere in this config file, simply surround
# them with ${}. For strings the variable must be within quotes (ie, "${STR_VAR}"),
# for numbers and booleans they should be plain (ie, ${INT_VAR}, ${BOOL_VAR})
# Config Sample under https://github.com/influxdata/telegraf/blob/master/etc/telegraf.conf
# Global tags can be specified here in key="value" format.
[global_tags]
# datacenter
dc="florianzirker.de"
source="telegraf_host"
# Configuration for telegraf agent
[agent]
## Default data collection interval for all inputs
interval = "10s"
## Rounds collection interval to 'interval'
## ie, if interval="10s" then always collect on :00, :10, :20, etc.
round_interval = true
## Telegraf will send metrics to outputs in batches of at most
## metric_batch_size metrics.
## This controls the size of writes that Telegraf sends to output plugins.
metric_batch_size = 1000
## Maximum number of unwritten metrics per output. Increasing this value
## allows for longer periods of output downtime without dropping metrics at the
## cost of higher maximum memory usage.
metric_buffer_limit = 10000
## Collection jitter is used to jitter the collection by a random amount.
## Each plugin will sleep for a random time within jitter before collecting.
## This can be used to avoid many plugins querying things like sysfs at the
## same time, which can have a measurable effect on the system.
collection_jitter = "0s"
## Default flushing interval for all outputs. Maximum flush_interval will be
## flush_interval + flush_jitter
flush_interval = "10s"
## Jitter the flush interval by a random amount. This is primarily to avoid
## large write spikes for users running a large number of telegraf instances.
## ie, a jitter of 5s and interval 10s means flushes will happen every 10-15s
flush_jitter = "0s"
## By default or when set to "0s", precision will be set to the same
## timestamp order as the collection interval, with the maximum being 1s.
## ie, when interval = "10s", precision will be "1s"
## when interval = "250ms", precision will be "1ms"
## Precision will NOT be used for service inputs. It is up to each individual
## service input to set the timestamp at the appropriate precision.
## Valid time units are "ns", "us" (or "µs"), "ms", "s".
precision = ""
## Override default hostname, if empty use os.Hostname()
hostname = "wong"
## If set to true, do no set the "host" tag in the telegraf agent.
omit_hostname = false
###############################################################################
# OUTPUT PLUGINS #
###############################################################################
# Configuration for sending metrics to InfluxDB
[[outputs.influxdb]]
## The full HTTP or UDP URL for your InfluxDB instance.
##
## Multiple URLs can be specified for a single cluster, only ONE of the
## urls will be written to each interval.
# urls = ["unix:///var/run/influxdb.sock"]
# urls = ["udp://127.0.0.1:8089"]
# urls = ["http://127.0.0.1:8086"]
## HTTP Basic Auth
username = "${INFLUXDB_HTTP_BASIC_AUTH_USER}"
password = "${INFLUXDB_HTTP_BASIC_AUTH_PASSWORD}"
urls = ["https://influxdb.florianzirker.de"] # required
###############################################################################
# INPUT PLUGINS #
###############################################################################
# Read metrics about cpu usage
[[inputs.cpu]]
## Whether to report per-cpu stats or not
percpu = true
## Whether to report total system cpu stats or not
totalcpu = true
## If true, collect raw CPU time metrics.
collect_cpu_time = false
## If true, compute and report the sum of all non-idle CPU states.
report_active = false
# Read metrics about disk usage by mount point
[[inputs.disk]]
## By default stats will be gathered for all mount points.
## Set mount_points will restrict the stats to only the specified mount points.
mount_points = ["/hostfs", "/hostfs/boot"]
## Ignore mount points by filesystem type.
ignore_fs = ["tmpfs", "devtmpfs", "devfs", "iso9660", "overlay", "aufs", "squashfs"]
# Read metrics about disk IO by device
[[inputs.diskio]]
## By default, telegraf will gather stats for all devices including
## disk partitions.
## Setting devices will restrict the stats to the specified devices.
# devices = ["sda", "sdb", "vd*"]
## Uncomment the following line if you need disk serial numbers.
# skip_serial_number = false
#
## On systems which support it, device metadata can be added in the form of
## tags.
## Currently only Linux is supported via udev properties. You can view
## available properties for a device by running:
## 'udevadm info -q property -n /dev/sda'
## Note: Most, but not all, udev properties can be accessed this way. Properties
## that are currently inaccessible include DEVTYPE, DEVNAME, and DEVPATH.
# device_tags = ["ID_FS_TYPE", "ID_FS_USAGE"]
#
## Using the same metadata source as device_tags, you can also customize the
## name of the device via templates.
## The 'name_templates' parameter is a list of templates to try and apply to
## the device. The template may contain variables in the form of '$PROPERTY' or
## '${PROPERTY}'. The first template which does not contain any variables not
## present for the device is used as the device name tag.
## The typical use case is for LVM volumes, to get the VG/LV name instead of
## the near-meaningless DM-0 name.
# name_templates = ["$ID_FS_LABEL","$DM_VG_NAME/$DM_LV_NAME"]
# Get kernel statistics from /proc/stat
[[inputs.kernel]]
# no configuration
# Read metrics about memory usage
[[inputs.mem]]
# no configuration
# Get the number of processes and group them by status
[[inputs.processes]]
# no configuration
# Read metrics about swap memory usage
[[inputs.swap]]
# no configuration
# Read metrics about system load & uptime
[[inputs.system]]
## Uncomment to remove deprecated metrics.
# fielddrop = ["uptime_format"]
# Gather metrics about network interfaces
[[inputs.net]]
## By default, telegraf gathers stats from any up interface (excluding loopback)
## Setting interfaces will tell it to gather these explicit interfaces,
## regardless of status. When specifying an interface, glob-style
## patterns are also supported.
##
interfaces = ["eth*"]
##
## On linux systems telegraf also collects protocol stats.
## Setting ignore_protocol_stats to true will skip reporting of protocol metrics.
##
# ignore_protocol_stats = false
##
# # Read TCP metrics such as established, time wait and sockets counts.
[[inputs.netstat]]
# no configuration
# Collect kernel snmp counters and network interface statistics
[[inputs.nstat]]
## file paths for proc files. If empty default paths will be used:
## /proc/net/netstat, /proc/net/snmp, /proc/net/snmp6
## These can also be overridden with env variables, see README.
proc_net_netstat = "/proc/net/netstat"
proc_net_snmp = "/proc/net/snmp"
proc_net_snmp6 = "/proc/net/snmp6"
## dump metrics with 0 values too
dump_zeros = true

View file

@ -1,175 +0,0 @@
# Telegraf Configuration
#
# Telegraf is entirely plugin driven. All metrics are gathered from the
# declared inputs, and sent to the declared outputs.
#
# Plugins must be declared in here to be active.
# To deactivate a plugin, comment out the name and any variables.
#
# Use 'telegraf -config telegraf.conf -test' to see what metrics a config
# file would generate.
#
# Environment variables can be used anywhere in this config file, simply surround
# them with ${}. For strings the variable must be within quotes (ie, "${STR_VAR}"),
# for numbers and booleans they should be plain (ie, ${INT_VAR}, ${BOOL_VAR})
# Config Sample under https://github.com/influxdata/telegraf/blob/master/etc/telegraf.conf
# Global tags can be specified here in key="value" format.
[global_tags]
# datacenter
dc="florianzirker.de"
source="telegraf_web"
# Configuration for telegraf agent
[agent]
## Default data collection interval for all inputs
interval = "10s"
## Rounds collection interval to 'interval'
## ie, if interval="10s" then always collect on :00, :10, :20, etc.
round_interval = true
## Telegraf will send metrics to outputs in batches of at most
## metric_batch_size metrics.
## This controls the size of writes that Telegraf sends to output plugins.
metric_batch_size = 1000
## Maximum number of unwritten metrics per output. Increasing this value
## allows for longer periods of output downtime without dropping metrics at the
## cost of higher maximum memory usage.
metric_buffer_limit = 10000
## Collection jitter is used to jitter the collection by a random amount.
## Each plugin will sleep for a random time within jitter before collecting.
## This can be used to avoid many plugins querying things like sysfs at the
## same time, which can have a measurable effect on the system.
collection_jitter = "0s"
## Default flushing interval for all outputs. Maximum flush_interval will be
## flush_interval + flush_jitter
flush_interval = "10s"
## Jitter the flush interval by a random amount. This is primarily to avoid
## large write spikes for users running a large number of telegraf instances.
## ie, a jitter of 5s and interval 10s means flushes will happen every 10-15s
flush_jitter = "0s"
## By default or when set to "0s", precision will be set to the same
## timestamp order as the collection interval, with the maximum being 1s.
## ie, when interval = "10s", precision will be "1s"
## when interval = "250ms", precision will be "1ms"
## Precision will NOT be used for service inputs. It is up to each individual
## service input to set the timestamp at the appropriate precision.
## Valid time units are "ns", "us" (or "µs"), "ms", "s".
precision = ""
## Override default hostname, if empty use os.Hostname()
hostname = "wong"
## If set to true, do no set the "host" tag in the telegraf agent.
omit_hostname = false
###############################################################################
# OUTPUT PLUGINS #
###############################################################################
# Configuration for sending metrics to InfluxDB
[[outputs.influxdb]]
## The full HTTP or UDP URL for your InfluxDB instance.
##
## Multiple URLs can be specified for a single cluster, only ONE of the
## urls will be written to each interval.
# urls = ["unix:///var/run/influxdb.sock"]
# urls = ["udp://127.0.0.1:8089"]
# urls = ["http://127.0.0.1:8086"]
urls = ["http://influxdb:8086"]
###############################################################################
# INPUT PLUGINS #
###############################################################################
[[inputs.http]]
name_override = "jitsi_stats"
urls = [
"http://jvb:8080/colibri/stats"
]
data_format = "json"
# Read metrics about docker containers
[[inputs.docker]]
## Docker Endpoint
## To use TCP, set endpoint = "tcp://[ip]:[port]"
## To use environment variables (ie, docker-machine), set endpoint = "ENV"
endpoint = "tcp://docker-socket-proxy:2375"
# endpoint = "unix:///var/run/docker.sock"
## Set to true to collect Swarm metrics(desired_replicas, running_replicas)
## Note: configure this in one of the manager nodes in a Swarm cluster.
## configuring in multiple Swarm managers results in duplication of metrics.
gather_services = false
## Only collect metrics for these containers. Values will be appended to
## container_name_include.
## Deprecated (1.4.0), use container_name_include
container_names = []
## Set the source tag for the metrics to the container ID hostname, eg first 12 chars
source_tag = false
## Containers to include and exclude. Collect all if empty. Globs accepted.
container_name_include = []
container_name_exclude = []
## Container states to include and exclude. Globs accepted.
## When empty only containers in the "running" state will be captured.
## example: container_state_include = ["created", "restarting", "running", "removing", "paused", "exited", "dead"]
## example: container_state_exclude = ["created", "restarting", "running", "removing", "paused", "exited", "dead"]
# container_state_include = []
# container_state_exclude = []
## Timeout for docker list, info, and stats commands
timeout = "5s"
## Whether to report for each container per-device blkio (8:0, 8:1...),
## network (eth0, eth1, ...) and cpu (cpu0, cpu1, ...) stats or not.
## Usage of this setting is discouraged since it will be deprecated in favor of 'perdevice_include'.
## Default value is 'true' for backwards compatibility, please set it to 'false' so that 'perdevice_include' setting
## is honored.
perdevice = false
## Specifies for which classes a per-device metric should be issued
## Possible values are 'cpu' (cpu0, cpu1, ...), 'blkio' (8:0, 8:1, ...) and 'network' (eth0, eth1, ...)
## Please note that this setting has no effect if 'perdevice' is set to 'true'
perdevice_include = ["cpu", "blkio", "network"]
## Whether to report for each container total blkio and network stats or not.
## Usage of this setting is discouraged since it will be deprecated in favor of 'total_include'.
## Default value is 'false' for backwards compatibility, please set it to 'true' so that 'total_include' setting
## is honored.
total = true
## Specifies for which classes a total metric should be issued. Total is an aggregated of the 'perdevice' values.
## Possible values are 'cpu', 'blkio' and 'network'
## Total 'cpu' is reported directly by Docker daemon, and 'network' and 'blkio' totals are aggregated by this plugin.
## Please note that this setting has no effect if 'total' is set to 'false'
total_include = ["cpu", "blkio", "network"]
## docker labels to include and exclude as tags. Globs accepted.
## Note that an empty array for both will include all labels as tags
docker_label_include = []
docker_label_exclude = []
## Which environment variables should we use as a tag
tag_env = ["JAVA_HOME", "HEAP_SIZE"]
## Optional TLS Config
# tls_ca = "/etc/telegraf/ca.pem"
# tls_cert = "/etc/telegraf/cert.pem"
# tls_key = "/etc/telegraf/key.pem"
## Use TLS but skip chain & host verification
# insecure_skip_verify = false

View file

@ -29,7 +29,6 @@ services:
- "traefik.http.middlewares.nextcloudHeader.headers.customResponseHeaders.X-Robots-Tag=noindex,nofollow"
- "traefik.http.routers.nextcloud.middlewares=nextcloudHeader"
- "docker.group=netxtcloud"
- "diun.enable=true"
app:
@ -106,7 +105,6 @@ services:
- "traefik.http.routers.ncpush.middlewares=ncpushstrip"
- "traefik.http.middlewares.ncpushstrip.stripprefix.prefixes=/push"
- "docker.group=proxy"
- "diun.enable=true"
db:
@ -138,7 +136,6 @@ services:
timeout: 3s
labels:
- "docker.group=netxtcloud"
- "diun.enable=true"
# adminer:
@ -161,7 +158,6 @@ services:
# - "traefik.http.routers.adminer.middlewares=adminerauth"
# - "traefik.http.middlewares.adminerauth.basicauth.users=${HTPASSWD_ADMINER}"
# - "docker.group=netxtcloud"
# - "diun.enable=true"
redis:
@ -170,7 +166,6 @@ services:
command: redis-server --requirepass ${REDIS_HOST_PASSWORD}
networks:
- nextcloud
- monitoring
volumes:
- ${VOLUMES_PATH}/nextcloud/redis:/data
healthcheck:
@ -179,7 +174,6 @@ services:
timeout: 3s
labels:
- "docker.group=netxtcloud"
- "diun.enable=true"
collabora:
@ -208,7 +202,6 @@ services:
- "traefik.http.routers.collabora.tls.options=intermediate@file"
- "traefik.http.services.collabora.loadbalancer.server.port=9980"
- "docker.group=netxtcloud"
- "diun.enable=true"
drawio-export:
@ -222,7 +215,6 @@ services:
restart: unless-stopped
labels:
- "docker.group=netxtcloud"
- "diun.enable=false" # not enabled becaus of spamming :)
drawio:
@ -252,7 +244,6 @@ services:
- "traefik.http.routers.drawio.tls.options=intermediate@file"
- "traefik.http.services.drawio.loadbalancer.server.port=8080"
- "docker.group=netxtcloud"
- "diun.enable=false" # not enabled becaus of spamming :)
networks:
web:
@ -262,5 +253,3 @@ networks:
config:
- subnet: 172.153.0.0/16 # necessary for the notify_push <-> nextcloud traffic
mariadb:
monitoring:
external: true

View file

@ -59,7 +59,7 @@ services:
- "traefik.http.routers.dashboard.middlewares=auth"
- "traefik.http.middlewares.auth.basicauth.users=${HTPASSWD}"
- "docker.group=proxy"
- "diun.enable=true"
docker-socket-proxy:
image: tecnativa/docker-socket-proxy
@ -71,7 +71,6 @@ services:
- LOG_LEVEL=warning
- CONTAINERS=1
- INFO=1
- IMAGES=1 # for diun
networks:
- dockersocket
healthcheck:
@ -81,7 +80,6 @@ services:
privileged: true
labels:
- "docker.group=proxy"
- "diun.enable=true"
# whoami:

View file

@ -32,10 +32,9 @@ services:
- "traefik.http.routers.push.tls.options=intermediate@file"
- "traefik.http.services.push.loadbalancer.server.port=80"
- "docker.group=push"
- "diun.enable=true"
networks:
push:
web:
external: true

View file

@ -17,7 +17,7 @@ services:
restart: unless-stopped
labels:
- "docker.group=rustdesk"
- "diun.enable=true"
hbbr:
ports:
@ -32,7 +32,7 @@ services:
restart: unless-stopped
labels:
- "docker.group=rustdesk"
- "diun.enable=true"
networks:
rustdesk:

View file

@ -6,7 +6,6 @@ function up {
#up proxy --scale whoami=3;
up proxy;
up monitoring;
up nextcloud;
up git;
up wallabag;

View file

@ -32,7 +32,6 @@ services:
- "traefik.http.routers.wallabag.tls.certresolver=netcup"
- "traefik.http.routers.wallabag.tls.options=intermediate@file"
- "docker.group=wallabag"
- "diun.enable=true"
depends_on:
- db
- redis
@ -48,7 +47,6 @@ services:
- /var/dockervolumes/wallabag/db:/var/lib/mysql
labels:
- "docker.group=wallabag"
- "diun.enable=true"
redis:
image: redis:7.2.4
@ -61,7 +59,6 @@ services:
timeout: 3s
labels:
- "docker.group=wallabag"
- "diun.enable=true"
networks:

View file

@ -27,7 +27,6 @@ services:
- "traefik.http.routers.www-secure.tls.certresolver=netcup"
- "traefik.http.routers.www-secure.tls.options=intermediate@file"
- "docker.group=www"
- "diun.enable=true"
networks:
web: