26 Commits

Author SHA1 Message Date
22efffdbe7 fix alloy configuration 2026-03-20 14:51:42 -04:00
676e159e34 Adding AI-generated compose/config files for alloy/loki/grafana stack 2026-03-19 21:51:38 -04:00
203c8a15ba Adding compose.yaml and related files for Beszel 2026-03-19 21:46:54 -04:00
b99fc435a8 create compose.yaml 2026-03-16 22:57:29 -04:00
e2f05f6e9c adding prometheus stuff 2026-03-16 22:47:28 -04:00
4bf457c84a Merge branch 'dev' 2025-12-01 23:49:03 -05:00
de296b9c76 Upgrade Immich to v2.3.1 2025-12-01 23:46:25 -05:00
81a2ef1cec merging .env to master from dev 2025-11-28 10:09:43 -05:00
2b2b3e43c9 merging up to master from dev 2025-11-28 10:07:28 -05:00
462a2ab8f1 Upgrade nextcloud to v32 2025-11-28 09:41:27 -05:00
f4c423b8b2 Upgrade immich to v2.2.3 2025-11-22 11:33:58 -05:00
5c8984cb56 upgrade to Immich v2.1.0 2025-11-17 00:00:53 -05:00
8de8c99c8a Upgrade immich to v2.0.0 - Stable Release 2025-10-04 00:00:17 -04:00
1c02ba1c93 Upgrade immich to v1.144.1 2025-10-03 23:52:00 -04:00
3b4991239f Upgrade immich to v1.143.0 2025-10-03 23:49:06 -04:00
d1532b0011 Upgrade immich to v1.142.1 2025-10-03 23:39:01 -04:00
b62e6bc65a Upgrade immich to v1.142.0 2025-10-03 17:28:16 -04:00
2bcf36d6d0 Upgrade immich to v1.141.1 2025-10-03 17:18:29 -04:00
f8c4fd560e Upgrade immich to v1.140.0 2025-10-03 17:14:42 -04:00
7d8a3708fb Upgrade immich to v1.139.2 2025-10-03 17:08:36 -04:00
1e82798a3b Upgrade immich to v1.138.0 2025-10-03 17:03:00 -04:00
92653301a6 Upgrade immich to v137.0 2025-10-03 16:53:05 -04:00
f2f50600fc Fix bind mount for /media directory 2025-10-03 16:37:46 -04:00
391bb529fc Add compose file for MakeMKV 2025-09-22 22:46:38 -04:00
67d956ed97 Merge pull request 'upgrade Immich to v136.0' (#2) from immich_upgrade into master
Reviewed-on: #2
2025-07-24 16:57:01 -04:00
bc692bff50 Merge pull request 'immich_upgrade' (#1) from immich_upgrade into master
Reviewed-on: #1
2025-07-24 16:37:25 -04:00
19 changed files with 817 additions and 12 deletions

27
beszel/compose.full Normal file
View File

@@ -0,0 +1,27 @@
services:
beszel:
image: henrygd/beszel:latest
container_name: beszel
restart: unless-stopped
environment:
APP_URL: http://localhost:8090
ports:
- 8090:8090
volumes:
- ./beszel_data:/beszel_data
- ./beszel_socket:/beszel_socket
beszel-agent:
image: henrygd/beszel-agent:latest
container_name: beszel-agent
restart: unless-stopped
network_mode: host
volumes:
- ./beszel_agent_data:/var/lib/beszel-agent
- ./beszel_socket:/beszel_socket
- /var/run/docker.sock:/var/run/docker.sock:ro
environment:
LISTEN: /beszel_socket/beszel.sock
HUB_URL: http://localhost:8090
TOKEN: da52524a-fa93-47ef-908c-dd001c9e8e18
KEY: "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIFgDayLLA8JOzQ7fZtqTlUaRIQvhtoYSZvkUC9PrdQJG"

13
beszel/compose.hubonly Normal file
View File

@@ -0,0 +1,13 @@
services:
beszel:
image: henrygd/beszel:latest
container_name: beszel
restart: unless-stopped
environment:
APP_URL: http://localhost:8090
ports:
- 8090:8090
volumes:
- ./beszel_data:/beszel_data
- ./beszel_socket:/beszel_socket

27
beszel/compose.yaml Normal file
View File

@@ -0,0 +1,27 @@
services:
beszel:
image: henrygd/beszel:latest
container_name: beszel
restart: unless-stopped
environment:
APP_URL: http://localhost:8090
ports:
- 8090:8090
volumes:
- ./beszel_data:/beszel_data
- ./beszel_socket:/beszel_socket
beszel-agent:
image: henrygd/beszel-agent:latest
container_name: beszel-agent
restart: unless-stopped
network_mode: host
volumes:
- ./beszel_agent_data:/var/lib/beszel-agent
- ./beszel_socket:/beszel_socket
- /var/run/docker.sock:/var/run/docker.sock:ro
environment:
LISTEN: /beszel_socket/beszel.sock
HUB_URL: http://localhost:8090
TOKEN: da52524a-fa93-47ef-908c-dd001c9e8e18
KEY: "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIFgDayLLA8JOzQ7fZtqTlUaRIQvhtoYSZvkUC9PrdQJG"

View File

@@ -9,7 +9,7 @@ DB_DATA_LOCATION=/mnt/storage/appdata/immich/postgres
TZ=America/New_York TZ=America/New_York
# The Immich version to use. You can pin this to a specific version like "v1.71.0" # The Immich version to use. You can pin this to a specific version like "v1.71.0"
IMMICH_VERSION=v1.136.0 IMMICH_VERSION=v2.3.1
# Connection secret for postgres. You should change it to a random password # Connection secret for postgres. You should change it to a random password
DB_PASSWORD=postgres DB_PASSWORD=postgres

View File

@@ -1,10 +1,11 @@
# #
# WARNING: Make sure to use the docker-compose.yml of the current release: # WARNING: To install Immich, follow our guide: https://immich.app/docs/install/docker-compose
#
# Make sure to use the docker-compose.yml of the current release:
# #
# https://github.com/immich-app/immich/releases/latest/download/docker-compose.yml # https://github.com/immich-app/immich/releases/latest/download/docker-compose.yml
# #
# The compose file on main may not be compatible with the latest release. # The compose file on main may not be compatible with the latest release.
#
name: immich name: immich
@@ -16,50 +17,57 @@ services:
# file: hwaccel.transcoding.yml # file: hwaccel.transcoding.yml
# service: cpu # set to one of [nvenc, quicksync, rkmpp, vaapi, vaapi-wsl] for accelerated transcoding # service: cpu # set to one of [nvenc, quicksync, rkmpp, vaapi, vaapi-wsl] for accelerated transcoding
volumes: volumes:
- ${UPLOAD_LOCATION}:/usr/src/app/upload # Do not edit the next line. If you want to change the media storage location on your system, edit the value of UPLOAD_LOCATION in the .env file
- ${UPLOAD_LOCATION}:/data
- /etc/localtime:/etc/localtime:ro - /etc/localtime:/etc/localtime:ro
env_file: env_file:
- .env - .env
ports: ports:
- 2283:2283 - '2283:2283'
depends_on: depends_on:
- redis - redis
- database - database
restart: always restart: always
healthcheck:
disable: false
immich-machine-learning: immich-machine-learning:
container_name: immich_machine_learning container_name: immich_machine_learning
# For hardware acceleration, add one of -[armnn, cuda, openvino] to the image tag. # For hardware acceleration, add one of -[armnn, cuda, rocm, openvino, rknn] to the image tag.
# Example tag: ${IMMICH_VERSION:-release}-cuda # Example tag: ${IMMICH_VERSION:-release}-cuda
image: ghcr.io/immich-app/immich-machine-learning:${IMMICH_VERSION:-release} image: ghcr.io/immich-app/immich-machine-learning:${IMMICH_VERSION:-release}
# extends: # uncomment this section for hardware acceleration - see https://immich.app/docs/features/ml-hardware-acceleration # extends: # uncomment this section for hardware acceleration - see https://immich.app/docs/features/ml-hardware-acceleration
# file: hwaccel.ml.yml # file: hwaccel.ml.yml
# service: cpu # set to one of [armnn, cuda, openvino, openvino-wsl] for accelerated inference - use the `-wsl` version for WSL2 where applicable # service: cpu # set to one of [armnn, cuda, rocm, openvino, openvino-wsl, rknn] for accelerated inference - use the `-wsl` version for WSL2 where applicable
volumes: volumes:
- model-cache:/cache - model-cache:/cache
env_file: env_file:
- .env - .env
restart: always restart: always
healthcheck:
disable: false
redis: redis:
container_name: immich_redis container_name: immich_redis
image: docker.io/redis:6.2-alpine@sha256:d6c2911ac51b289db208767581a5d154544f2b2fe4914ea5056443f62dc6e900 image: docker.io/valkey/valkey:8-bookworm@sha256:fea8b3e67b15729d4bb70589eb03367bab9ad1ee89c876f54327fc7c6e618571
healthcheck: healthcheck:
test: redis-cli ping || exit 1 test: redis-cli ping || exit 1
restart: always restart: always
database: database:
container_name: immich_postgres container_name: immich_postgres
image: ghcr.io/immich-app/postgres:14-vectorchord0.3.0-pgvectors0.2.0 image: ghcr.io/immich-app/postgres:14-vectorchord0.4.3-pgvectors0.2.0@sha256:8d292bdb796aa58bbbaa47fe971c8516f6f57d6a47e7172e62754feb6ed4e7b0
environment: environment:
POSTGRES_PASSWORD: ${DB_PASSWORD} POSTGRES_PASSWORD: ${DB_PASSWORD}
POSTGRES_USER: ${DB_USERNAME} POSTGRES_USER: ${DB_USERNAME}
POSTGRES_DB: ${DB_DATABASE_NAME} POSTGRES_DB: ${DB_DATABASE_NAME}
POSTGRES_INITDB_ARGS: '--data-checksums' POSTGRES_INITDB_ARGS: '--data-checksums'
# Uncomment the DB_STORAGE_TYPE: 'HDD' var if your database isn't stored on SSDs # Uncomment the DB_STORAGE_TYPE: 'HDD' var if your database isn't stored on SSDs
DB_STORAGE_TYPE: 'HDD' # DB_STORAGE_TYPE: 'HDD'
volumes: volumes:
# Do not edit the next line. If you want to change the database storage location on your system, edit the value of DB_DATA_LOCATION in the .env file
- ${DB_DATA_LOCATION}:/var/lib/postgresql/data - ${DB_DATA_LOCATION}:/var/lib/postgresql/data
shm_size: 128mb
restart: always restart: always
volumes: volumes:

View File

@@ -2,13 +2,16 @@ services:
jellyfin: jellyfin:
image: jellyfin/jellyfin image: jellyfin/jellyfin
container_name: jellyfin container_name: jellyfin
user: 1000:1000
network_mode: 'host' network_mode: 'host'
volumes: volumes:
- /mnt/storage/appdata/jellyfin/config:/config - /mnt/storage/appdata/jellyfin/config:/config
- /mnt/storage/appdata/jellyfin/config/web-config.json:/jellyfin/jellyfin-web/config.json - /mnt/storage/appdata/jellyfin/config/web-config.json:/jellyfin/jellyfin-web/config.json
- /mnt/storage/appdata/jellyfin/cache:/cache - /mnt/storage/appdata/jellyfin/cache:/cache
- /mnt/cygnus:/cygnus:ro - /mnt/cygnus:/cygnus:ro
- type: bind
source: /mnt/storage/appdata/jellyfin/media
target: /media
read_only: true
restart: 'unless-stopped' restart: 'unless-stopped'
# Optional - alternative address used for autodiscovery # Optional - alternative address used for autodiscovery
# environment: # environment:

121
loki/README.md Normal file
View File

@@ -0,0 +1,121 @@
# Homelab Log Aggregation Stack
Grafana Alloy + Loki + Grafana, configured for:
- **MikroTik RB5009** (and other network devices) via syslog
- **Docker container logs** on the host machine
## Quick Start
```bash
# (Optional) load secrets first if using the Vaultwarden secrets workflow
# ./secrets-load.sh docker/loki-stack .env
docker compose up -d
```
Grafana will be available at **http://\<host-ip\>:3098**
Default login: `admin` / `admin` — you will be prompted to change this.
---
## MikroTik RB5009 Configuration
In RouterOS (Winbox or SSH), run:
```routeros
# Create a remote logging action pointing at this Docker host
/system logging action
add name=remote-loki \
target=remote \
remote=<YOUR-DOCKER-HOST-IP> \
remote-port=514 \
bsd-syslog=yes \
syslog-facility=local0 \
syslog-severity=auto
# Send all log topics to Loki
/system logging
add action=remote-loki topics=all
```
To verify it's working, SSH into the RB5009 and run:
```routeros
/log print follow
```
...then in Grafana, open Explore → Loki and query `{source="network"}`.
You should see entries appearing within a few seconds.
---
## Useful LogQL Queries
**All RB5009 logs:**
```logql
{job="syslog", source="network"}
```
**RB5009 interface/link events only:**
```logql
{job="syslog", source="network"} |= "link"
```
**All logs from a specific Docker container:**
```logql
{job="docker", container="myapp"}
```
**Errors across all Docker containers:**
```logql
{job="docker"} |= "error" | logfmt | level="error"
```
**Everything in the last 24 hours, newest first:**
```logql
{job=~"syslog|docker"} | line_format "{{.source}} {{.container}} {{.message}}"
```
---
## File Layout
```
loki-stack/
├── docker-compose.yml
├── alloy/
│ └── config.alloy # Alloy pipeline config (syslog + Docker)
├── loki/
│ └── loki-config.yml # Loki storage and retention config
└── grafana/
└── provisioning/
└── datasources/
└── loki.yml # Auto-provisions Loki as default datasource
```
## Retention
Logs are kept for **90 days** by default. To change this, edit `loki/loki-config.yml`:
```yaml
limits_config:
retention_period: 30d # or 180d, etc.
```
Then restart Loki: `docker compose restart loki`
## Adding More Syslog Sources
Any device that can send syslog (UDP/TCP 514) will work automatically —
the `host` label will be set from the syslog hostname field, so you can
filter per-device in Grafana with `{host="my-device-hostname"}`.
---
## Integrating with the Vaultwarden Secrets Workflow
If you're using the `secrets-load.sh` script, store the Grafana admin
password as a custom field named `GF_SECURITY_ADMIN_PASSWORD` in a
Vaultwarden item called `docker/loki-stack`, then replace the hardcoded
value in `docker-compose.yml` with:
```yaml
env_file:
- .env
```

90
loki/alloy/config.alloy Normal file
View File

@@ -0,0 +1,90 @@
// Grafana Alloy configuration
// Collects:
// 1. Syslog over UDP/TCP port 514 — for MikroTik RB5009 and other network gear
// 2. Docker container logs — for all containers on this host
// Forwards everything to Loki.
// ── 1. SYSLOG RECEIVER ────────────────────────────────────────────────────────
// Listens on 514 UDP and TCP. Point your MikroTik logging action at this host.
loki.source.syslog "network_devices" {
listener {
address = "0.0.0.0:514"
protocol = "udp"
labels = {
job = "syslog",
source = "network",
}
}
listener {
address = "0.0.0.0:514"
protocol = "tcp"
labels = {
job = "syslog",
source = "network",
}
}
// Forward to the relabeling stage below
forward_to = [loki.process.syslog_relabel.receiver]
}
// Relabel syslog: promote the hostname field (sent by RouterOS) to a label
// so you can filter by device in Grafana with {host="RB5009"} etc.
loki.process "syslog_relabel" {
stage.labels {
values = {
host = "__syslog_message_hostname",
severity = "__syslog_message_severity",
facility = "__syslog_message_facility",
app = "__syslog_message_app_name",
}
}
forward_to = [loki.write.default.receiver]
}
// ── 2. DOCKER CONTAINER LOGS ─────────────────────────────────────────────────
// Tails logs from all Docker containers on this host.
// Adds container_name and image as labels for easy filtering.
discovery.docker "containers" {
host = "unix:///var/run/docker.sock"
}
// Relabel Docker metadata into useful Loki labels
discovery.relabel "docker_labels" {
targets = discovery.docker.containers.targets
rule {
source_labels = ["__meta_docker_container_name"]
regex = "/(.*)"
target_label = "container"
}
rule {
source_labels = ["__meta_docker_container_log_stream"]
target_label = "stream"
}
rule {
source_labels = ["__meta_docker_image_name"]
target_label = "image"
}
}
loki.source.docker "docker_logs" {
host = "unix:///var/run/docker.sock"
targets = discovery.relabel.docker_labels.output
labels = { job = "docker" }
forward_to = [loki.write.default.receiver]
relabel_rules = discovery.relabel.docker_labels.rules
}
// ── 3. LOKI WRITE TARGET ──────────────────────────────────────────────────────
// All sources above forward here.
loki.write "default" {
endpoint {
url = "http://loki:3100/loki/api/v1/push"
}
}

73
loki/compose.yaml Normal file
View File

@@ -0,0 +1,73 @@
---
# Loki + Alloy + Grafana log aggregation stack
# Place this file in a directory e.g. ~/docker/loki-stack/
# Run with: docker compose up -d
networks:
logging:
driver: bridge
volumes:
loki-data:
grafana-data:
services:
# ── Loki: log storage and query engine ──────────────────────────────────────
loki:
image: grafana/loki:3.4.2
container_name: loki
restart: unless-stopped
networks:
- logging
ports:
- "3100:3100" # Loki HTTP API (Alloy pushes here; Grafana queries here)
volumes:
- loki-data:/loki
- ./config/loki.yml:/etc/loki/loki.yml:ro
command: -config.file=/etc/loki/loki.yml
healthcheck:
test: ["CMD-SHELL", "wget -q --spider http://localhost:3100/ready || exit 1"]
interval: 30s
timeout: 5s
retries: 5
# ── Alloy: log collector / syslog receiver ───────────────────────────────────
alloy:
image: grafana/alloy:v1.7.5
container_name: alloy
restart: unless-stopped
networks:
- logging
ports:
- "514:514/udp" # Syslog UDP (for MikroTik and other network devices)
- "514:514/tcp" # Syslog TCP
- "12345:12345" # Alloy UI (optional, useful for debugging)
volumes:
- ./config/alloy.alloy:/etc/alloy/config.alloy:ro
- /var/lib/docker/containers:/var/lib/docker/containers:ro # Docker log access
- /var/run/docker.sock:/var/run/docker.sock:ro # Docker metadata
command: run /etc/alloy/config.alloy --server.http.listen-addr=0.0.0.0:12345
depends_on:
loki:
condition: service_healthy
# ── Grafana: log query UI ────────────────────────────────────────────────────
grafana:
image: grafana/grafana:11.5.2
container_name: grafana
restart: unless-stopped
networks:
- logging
ports:
- "3098:3000"
volumes:
- grafana-data:/var/lib/grafana
- ./config/grafana-datasources.yml:/etc/grafana/provisioning/datasources/loki.yml:ro
environment:
- GF_AUTH_ANONYMOUS_ENABLED=true # Remove if you want login
- GF_AUTH_ANONYMOUS_ORG_ROLE=Admin # Remove if you want login
- GF_SECURITY_ADMIN_PASSWORD=changeme # Change this
depends_on:
loki:
condition: service_healthy

89
loki/config/alloy.alloy Normal file
View File

@@ -0,0 +1,89 @@
// Grafana Alloy configuration
// Collects:
// 1. Syslog over UDP/TCP port 514 — for MikroTik RB5009 and other network gear
// 2. Docker container logs — for all containers on this host
// Forwards everything to Loki.
// ── 1. SYSLOG RECEIVER ────────────────────────────────────────────────────────
// Listens on 514 UDP and TCP. Point your MikroTik logging action at this host.
loki.source.syslog "network_devices" {
listener {
address = "0.0.0.0:514"
protocol = "udp"
labels = {
job = "syslog",
source = "network",
}
}
listener {
address = "0.0.0.0:514"
protocol = "tcp"
labels = {
job = "syslog",
source = "network",
}
}
// loki.source.syslog automatically extracts hostname, app, facility, and
// severity from RFC3164/RFC5424 messages and exposes them as internal
// labels. We promote them to real Loki labels in the process stage below.
forward_to = [loki.process.syslog_relabel.receiver]
}
// Promote the syslog metadata fields to Loki labels.
loki.process "syslog_relabel" {
stage.labels {
values = {
host = "__syslog_message_hostname",
severity = "__syslog_message_severity",
facility = "__syslog_message_facility",
app = "__syslog_message_app_name",
}
}
forward_to = [loki.write.default.receiver]
}
// ── 2. DOCKER CONTAINER LOGS ─────────────────────────────────────────────────
// Tails logs from all Docker containers on this host.
// Adds container name and image as labels for easy filtering.
discovery.docker "containers" {
host = "unix:///var/run/docker.sock"
}
// Relabel Docker metadata into useful Loki labels.
discovery.relabel "docker_labels" {
targets = discovery.docker.containers.targets
rule {
source_labels = ["__meta_docker_container_name"]
regex = "/(.*)"
target_label = "container"
}
rule {
source_labels = ["__meta_docker_container_log_stream"]
target_label = "stream"
}
rule {
source_labels = ["__meta_docker_image_name"]
target_label = "image"
}
}
loki.source.docker "docker_logs" {
host = "unix:///var/run/docker.sock"
targets = discovery.relabel.docker_labels.output
labels = { job = "docker" }
forward_to = [loki.write.default.receiver]
}
// ── 3. LOKI WRITE TARGET ──────────────────────────────────────────────────────
loki.write "default" {
endpoint {
url = "http://loki:3100/loki/api/v1/push"
}
}

View File

@@ -0,0 +1,98 @@
// Alloy configuration
// Collects: (1) Docker container logs, (2) Syslog from network devices (MikroTik etc.)
// Pushes everything to local Loki instance.
// ── Loki destination ──────────────────────────────────────────────────────────
loki.write "local_loki" {
endpoint {
url = "http://loki:3100/loki/api/v1/push"
}
}
// ── Docker container log collection ──────────────────────────────────────────
// Discovers all running containers and tails their logs automatically.
// New containers are picked up without restarting Alloy.
discovery.docker "containers" {
host = "unix:///var/run/docker.sock"
}
discovery.relabel "docker_labels" {
targets = discovery.docker.containers.targets
// Use container name as the job label (strips the leading slash Docker adds)
rule {
source_labels = ["__meta_docker_container_name"]
regex = "/(.*)"
target_label = "container"
}
// Carry through the Docker Compose service name if present
rule {
source_labels = ["__meta_docker_container_label_com_docker_compose_service"]
target_label = "service"
}
// Carry through the Docker Compose project name if present
rule {
source_labels = ["__meta_docker_container_label_com_docker_compose_project"]
target_label = "compose_project"
}
rule {
target_label = "source"
replacement = "docker"
}
}
loki.source.docker "docker_logs" {
host = "unix:///var/run/docker.sock"
targets = discovery.relabel.docker_labels.output
forward_to = [loki.write.local_loki.receiver]
relabeling {
source_labels = ["__meta_docker_container_name"]
regex = "/(.*)"
target_label = "container"
}
}
// ── Syslog receiver (MikroTik RB5009 and other network devices) ──────────────
// Listens on UDP 514 and TCP 514.
// On your RB5009, set the remote logging action to point at this host's IP.
loki.source.syslog "network_syslog" {
listener {
address = "0.0.0.0:514"
protocol = "udp"
labels = {
source = "syslog",
job = "network_devices",
}
}
listener {
address = "0.0.0.0:514"
protocol = "tcp"
labels = {
source = "syslog",
job = "network_devices",
}
}
forward_to = [loki.process.syslog_relabel.receiver]
}
// Enrich syslog entries with a hostname label extracted from the syslog message
loki.process "syslog_relabel" {
forward_to = [loki.write.local_loki.receiver]
stage.syslog {} // Parses RFC3164/RFC5424 syslog and extracts hostname, app, facility, severity
stage.labels {
values = {
hostname = "hostname", // Extracted by stage.syslog
app = "app_name", // e.g. "dhcp", "firewall", "interface" on RouterOS
severity = "severity",
facility = "facility",
}
}
}

View File

@@ -0,0 +1,16 @@
# Grafana datasource provisioning
# Automatically configures Loki as a datasource on first startup.
# No manual setup needed in the Grafana UI.
apiVersion: 1
datasources:
- name: Loki
type: loki
access: proxy
url: http://loki:3100
isDefault: true
editable: false
jsonData:
maxLines: 5000
timeout: 60

56
loki/config/loki.yml Normal file
View File

@@ -0,0 +1,56 @@
# Loki configuration - single binary mode, suitable for homelab scale
# Docs: https://grafana.com/docs/loki/latest/configuration/
auth_enabled: false
server:
http_listen_port: 3100
grpc_listen_port: 9096
log_level: warn
common:
instance_addr: 127.0.0.1
path_prefix: /loki
storage:
filesystem:
chunks_directory: /loki/chunks
rules_directory: /loki/rules
replication_factor: 1
ring:
kvstore:
store: inmemory
schema_config:
configs:
- from: 2024-01-01
store: tsdb
object_store: filesystem
schema: v13
index:
prefix: index_
period: 24h
# ── Retention ─────────────────────────────────────────────────────────────────
# Adjust these to suit your disk space. 90 days is a good starting point for
# homelab troubleshooting — long enough to catch recurring issues.
limits_config:
retention_period: 90d
ingestion_rate_mb: 4
ingestion_burst_size_mb: 8
compactor:
working_directory: /loki/compactor
retention_enabled: true
retention_delete_delay: 2h
delete_request_store: filesystem
# ── Query performance ─────────────────────────────────────────────────────────
query_range:
results_cache:
cache:
embedded_cache:
enabled: true
max_size_mb: 100
ruler:
alertmanager_url: http://localhost:9093

View File

@@ -0,0 +1,17 @@
apiVersion: 1
datasources:
- name: Loki
type: loki
access: proxy
url: http://loki:3100
isDefault: true
editable: false
jsonData:
maxLines: 5000
# Derive fields let you turn log content into clickable links.
# This example makes trace IDs in logs clickable — remove if not needed.
derivedFields:
- name: TraceID
matcherRegex: "traceID=(\\w+)"
url: ""

55
loki/loki/loki-config.yml Normal file
View File

@@ -0,0 +1,55 @@
# Loki configuration — single-binary mode, suitable for homelab scale
# Stores data in the local filesystem via the 'loki-data' Docker volume
auth_enabled: false
server:
http_listen_port: 3100
grpc_listen_port: 9096
log_level: warn
common:
instance_addr: 127.0.0.1
path_prefix: /loki
storage:
filesystem:
chunks_directory: /loki/chunks
rules_directory: /loki/rules
replication_factor: 1
ring:
kvstore:
store: inmemory
# How long to keep logs. Adjust to taste.
# 90 days is generous but reasonable for a homelab — tune down if disk is tight.
limits_config:
retention_period: 90d
# Reject log lines larger than 256KB (protects against runaway logging)
max_line_size: 256KB
schema_config:
configs:
- from: 2024-01-01
store: tsdb
object_store: filesystem
schema: v13
index:
prefix: index_
period: 24h
compactor:
working_directory: /loki/compactor
# Enables the retention policy above
retention_enabled: true
retention_delete_delay: 2h
delete_request_store: filesystem
query_range:
results_cache:
cache:
embedded_cache:
enabled: true
max_size_mb: 100
ruler:
alertmanager_url: http://localhost:9093

11
makemkv/compose.yaml Normal file
View File

@@ -0,0 +1,11 @@
services:
makemkv:
image: jlesage/makemkv
ports:
- "5801:5800"
volumes:
- "/home/cjones/makemkv:/config:rw"
- "/home/cjones/makemkv/storage:/storage:ro"
- "/home/cjones/makemkv/output:/output:rw"
devices:
- "/dev/sr0:/dev/sr0"

View File

@@ -16,7 +16,7 @@ services:
app: app:
depends_on: depends_on:
- db - db
image: nextcloud:30 image: nextcloud:32
restart: always restart: always
ports: ports:
- 8080:80 - 8080:80

88
prometheus/compose.yaml Normal file
View File

@@ -0,0 +1,88 @@
volumes:
prometheus_data: {}
grafana_data: {}
networks:
monitoring:
driver: bridge
services:
prometheus:
image: prom/prometheus:latest
container_name: prometheus
volumes:
- ./prometheus:/etc/prometheus
- prometheus_data:/prometheus
command:
- '--config.file=/etc/prometheus/prometheus.yml'
- '--storage.tsdb.path=/prometheus'
- '--web.console.libraries=/usr/share/prometheus/console_libraries'
- '--web.console.templates=/usr/share/prometheus/consoles'
- '--web.enable-lifecycle'
ports:
- "9098:9090"
networks:
- monitoring
restart: unless-stopped
node-exporter:
image: prom/node-exporter:latest
container_name: node-exporter
volumes:
- /proc:/host/proc:ro
- /sys:/host/sys:ro
- /:/rootfs:ro
command:
- '--path.procfs=/host/proc'
- '--path.sysfs=/host/sys'
- '--collector.filesystem.ignored-mount-points=^/(sys|proc|dev|host|etc)($|/)'
ports:
- "9100:9100"
networks:
- monitoring
restart: unless-stopped
cadvisor:
image: gcr.io/cadvisor/cadvisor:latest
container_name: cadvisor
volumes:
- /:/rootfs:ro
- /var/run:/var/run:rw
- /sys:/sys:ro
- /var/lib/docker/:/var/lib/docker:ro
- /dev/disk/:/dev/disk:ro
ports:
- "8080:8080"
networks:
- monitoring
restart: unless-stopped
grafana:
image: grafana/grafana:latest
container_name: grafana
volumes:
- grafana_data:/var/lib/grafana
- ./grafana/provisioning:/etc/grafana/provisioning
environment:
- GF_SECURITY_ADMIN_USER=admin
- GF_SECURITY_ADMIN_PASSWORD=admin
- GF_USERS_ALLOW_SIGN_UP=false
ports:
- "3000:3000"
networks:
- monitoring
restart: unless-stopped
alertmanager:
image: prom/alertmanager:latest
container_name: alertmanager
volumes:
- ./alertmanager:/etc/alertmanager
command:
- '--config.file=/etc/alertmanager/config.yml'
- '--storage.path=/alertmanager'
ports:
- "9093:9093"
networks:
- monitoring
restart: unless-stopped

13
prometheus/prometheus.yml Normal file
View File

@@ -0,0 +1,13 @@
global:
scrape_interval: 15s
scrape_configs:
- job_name: 'prometheus'
static_configs:
- targets: ['localhost:9098']
- job_name: 'docker'
static_configs:
- targets: ['172.17.0.1:9323']