Compare commits

..

5 Commits

5 changed files with 486 additions and 550 deletions

View File

@ -1,6 +1,6 @@
# Grafana Clickhouse Docker Log Aggregation # Grafana Clickhouse Docker Log Aggregation
This demo project can aggregate logs from Docker containers and store them in Clickhouse. In the `dashboards` directory you can find dashboard examples for specific log format. `Logs + Structured via Query` is recommended. This demo project can aggregate logs from Docker containers and store them in Clickhouse. In the `dashboards` directory you can find dashboard examples for specific log format. `Logs + Structured via Query` is recommended because the other one is deprecated now.
**Note**: dashboards are tuned for structured logging (JSON log records). **Note**: dashboards are tuned for structured logging (JSON log records).
@ -8,8 +8,8 @@ This demo project can aggregate logs from Docker containers and store them in Cl
Obviously, you'll need Docker: Obviously, you'll need Docker:
1. `make all` 1. `make migrate` (use `clickhouse-client` inside container and execute migration SQL directly if migrator doesn't work).
2. Navigate to `http://grafana.test` or `http://localhost:3000`, use `admin:admin` as credentials. 2. Navigate to `http://grafana.test` or `http://localhost:3000` (if you did not configure Traefik locally), use `admin:admin` as credentials.
3. Create your own dashboard or import those I provided (`dashboards/` directory). Replace data source UUID with correct one if needed. 3. Import `dashboards/Logs + Structured via Query.json` dashboard.
4. Run any Docker project with structured JSON logs. 4. Run any Docker project with structured JSON logs.
5. Done. 5. Done.

53
compose.yml Normal file
View File

@ -0,0 +1,53 @@
services:
grafana:
image: grafana/grafana:latest
volumes:
- grafana_data:/var/lib/grafana
depends_on:
- clickhouse
ports:
- 3000:3000
labels:
traefik.enable: "true"
traefik.http.routers.grafana.entrypoints: web
traefik.http.routers.grafana.rule: 'Host(`grafana.test`)'
traefik.http.services.grafana.loadbalancer.server.port: 3000
clickhouse:
image: clickhouse/clickhouse-server:24
environment:
- CLICKHOUSE_DB=default
- CLICKHOUSE_USER=default
- CLICKHOUSE_PASSWORD=default
- CLICKHOUSE_DEFAULT_ACCESS_MANAGEMENT=1
ports:
- '8123:8123'
ulimits:
nofile:
soft: 262144
hard: 262144
cap_add:
- NET_ADMIN
- SYS_NICE
- IPC_LOCK
healthcheck:
test: wget --no-verbose --tries=1 --spider http://localhost:8123/ping || exit 1
interval: 10s
timeout: 5s
retries: 5
volumes:
- clickhouse_data:/var/lib/clickhouse
vector:
image: timberio/vector:latest-alpine
ports:
- '8686:8686'
depends_on:
- clickhouse
volumes:
- /var/run/docker.sock:/var/run/docker.sock:ro
- ./files/vector.yaml:/etc/vector/vector.yaml:ro
volumes:
clickhouse_data:
driver: local
grafana_data:
driver: local

File diff suppressed because it is too large Load Diff

View File

@ -1,36 +0,0 @@
version: '3.8'
services:
grafana:
image: grafana/grafana:latest
volumes:
- grafana_data:/var/lib/grafana
ports:
- '3000:3000'
labels:
traefik.enable: "true"
traefik.http.routers.grafana.entrypoints: web
traefik.http.routers.grafana.rule: "Host(`grafana.test`)"
traefik.http.services.grafana.loadbalancer.server.port: "3000"
clickhouse:
image: docker.io/bitnami/clickhouse:23
environment:
- CLICKHOUSE_ADMIN_USER=${CLICKHOUSE_USER}
- CLICKHOUSE_ADMIN_PASSWORD=${CLICKHOUSE_PASSWORD}
ports:
- '${CLICKHOUSE_PORT}:8123'
volumes:
- clickhouse_data:/bitnami/clickhouse
vector:
image: timberio/vector:latest-alpine
ports:
- '8686:8686'
volumes:
- /var/run/docker.sock:/var/run/docker.sock:ro
- ./files/vector.yaml:/etc/vector/vector.yaml:ro
volumes:
clickhouse_data:
driver: local
grafana_data:
driver: local

View File

@ -1,6 +1,6 @@
api: api:
enabled: true enabled: true
address: '0.0.0.0:8686' address: 0.0.0.0:8686
sources: sources:
docker: docker:
type: docker_logs type: docker_logs
@ -34,10 +34,10 @@ sinks:
inputs: inputs:
- dedupe - dedupe
compression: gzip compression: gzip
endpoint: 'http://clickhouse:8123' endpoint: http://clickhouse:8123
auth: auth:
strategy: basic strategy: basic
user: default user: default
password: default password: default
database: default database: default
table: log_docker_raw table: log_docker_raw