deleted: docs/diagrams/architecture.mmd

deleted:    docs/diagrams/monitoring-coverage.mmd
	deleted:    docs/monitoring-coverage.md
	deleted:    docs/network.md
	deleted:    docs/prometheus-inventory.md
	deleted:    docs/runtime/prometheus-inventory.json
	deleted:    docs/runtime/prometheus-query-exposure.json
	deleted:    docs/runtime/prometheus-query-hypervisor.json
	deleted:    docs/runtime/prometheus-query-job-instance.json
	deleted:    docs/runtime/prometheus-query-jobs.json
	deleted:    docs/runtime/prometheus-query-network.json
	deleted:    docs/runtime/prometheus-query-role.json
	deleted:    docs/runtime/prometheus-query-service.json
	deleted:    docs/runtime/prometheus-query-up.json
	deleted:    docs/runtime/prometheus-targets.json
	deleted:    scripts/export_prometheus_inventory.py
	deleted:    scripts/render_prometheus_docs.py
This commit is contained in:
git
2026-04-13 17:00:53 +10:00
parent ba41ce7eb0
commit f8370b6a99
17 changed files with 0 additions and 2935 deletions
-17
View File
@@ -1,17 +0,0 @@
flowchart TB
Declared[Declared architecture\n(Compose + docs)]
Runtime[Observed runtime\n(Prometheus inventory)]
Declared --> Runtime
subgraph Monitoring["Prometheus observed jobs"]
job_container_updates["container-updates"]
job_kuma["kuma"]
job_node["node"]
job_pihole["pihole"]
job_prometheus["prometheus"]
job_proxmox_storage["proxmox-storage"]
job_telegraf["telegraf"]
job_traefik["traefik"]
end
Runtime --> Monitoring
-90
View File
@@ -1,90 +0,0 @@
flowchart LR
Prom[Prometheus]
classDef scrape stroke-dasharray: 5 5;
subgraph host_docker_update_exporter["Host: docker-update-exporter"]
container_updates_docker_update_exporter_9105["container-updates\ndocker-update-exporter:9105"]
end
subgraph host_kuma_lan_ddnsgeek_com["Host: kuma.lan.ddnsgeek.com"]
kuma_kuma_lan_ddnsgeek_com["kuma\nkuma.lan.ddnsgeek.com"]
end
subgraph host_monitor_kuma["Host: monitor-kuma"]
kuma_monitor_kuma_3001["kuma\nmonitor-kuma:3001"]
end
subgraph host_nix_cache["Host: nix-cache"]
node_nix_cache_9100["node\nnix-cache:9100"]
end
subgraph host_node_exporter["Host: node-exporter"]
node_node_exporter_9100["node\nnode-exporter:9100"]
end
subgraph host_pbs_sweet_home["Host: pbs.sweet.home"]
node_pbs_sweet_home_9100["node\npbs.sweet.home:9100"]
proxmox_storage_pbs_sweet_home_9102["proxmox-storage\npbs.sweet.home:9102"]
end
subgraph host_pihole["Host: pihole"]
node_pihole_9100["node\npihole:9100"]
end
subgraph host_pihole_exporter["Host: pihole-exporter"]
pihole_pihole_exporter_9617["pihole\npihole-exporter:9617"]
end
subgraph host_prometheus["Host: prometheus"]
prometheus_prometheus_9090["prometheus\nprometheus:9090"]
end
subgraph host_pve_sweet_home["Host: pve.sweet.home"]
node_pve_sweet_home_9100["node\npve.sweet.home:9100"]
proxmox_storage_pve_sweet_home_9101["proxmox-storage\npve.sweet.home:9101"]
end
subgraph host_raspberrypi_tail13f623_ts_net["Host: raspberrypi.tail13f623.ts.net"]
container_updates_raspberrypi_tail13f623_ts_net_9105["container-updates\nraspberrypi.tail13f623.ts.net:9105"]
node_raspberrypi_tail13f623_ts_net_9100["node\nraspberrypi.tail13f623.ts.net:9100"]
telegraf_raspberrypi_tail13f623_ts_net_9273["telegraf\nraspberrypi.tail13f623.ts.net:9273"]
traefik_raspberrypi_tail13f623_ts_net_8080["traefik\nraspberrypi.tail13f623.ts.net:8080"]
end
subgraph host_server["Host: server"]
node_server_9100["node\nserver:9100"]
end
subgraph host_telegraf["Host: telegraf"]
telegraf_telegraf_9273["telegraf\ntelegraf:9273"]
end
subgraph host_traefik_lan_ddnsgeek_com["Host: traefik.lan.ddnsgeek.com"]
traefik_traefik_lan_ddnsgeek_com_8080["traefik\ntraefik.lan.ddnsgeek.com:8080"]
end
Prom -. scrape .-> container_updates_docker_update_exporter_9105
class container_updates_docker_update_exporter_9105 scrape;
Prom -. scrape .-> container_updates_raspberrypi_tail13f623_ts_net_9105
class container_updates_raspberrypi_tail13f623_ts_net_9105 scrape;
Prom -. scrape .-> kuma_kuma_lan_ddnsgeek_com
class kuma_kuma_lan_ddnsgeek_com scrape;
Prom -. scrape .-> kuma_monitor_kuma_3001
class kuma_monitor_kuma_3001 scrape;
Prom -. scrape .-> node_nix_cache_9100
class node_nix_cache_9100 scrape;
Prom -. scrape .-> node_node_exporter_9100
class node_node_exporter_9100 scrape;
Prom -. scrape .-> node_pbs_sweet_home_9100
class node_pbs_sweet_home_9100 scrape;
Prom -. scrape .-> node_pihole_9100
class node_pihole_9100 scrape;
Prom -. scrape .-> node_pve_sweet_home_9100
class node_pve_sweet_home_9100 scrape;
Prom -. scrape .-> node_raspberrypi_tail13f623_ts_net_9100
class node_raspberrypi_tail13f623_ts_net_9100 scrape;
Prom -. scrape .-> node_server_9100
class node_server_9100 scrape;
Prom -. scrape .-> pihole_pihole_exporter_9617
class pihole_pihole_exporter_9617 scrape;
Prom -. scrape .-> prometheus_prometheus_9090
class prometheus_prometheus_9090 scrape;
Prom -. scrape .-> proxmox_storage_pbs_sweet_home_9102
class proxmox_storage_pbs_sweet_home_9102 scrape;
Prom -. scrape .-> proxmox_storage_pve_sweet_home_9101
class proxmox_storage_pve_sweet_home_9101 scrape;
Prom -. scrape .-> telegraf_raspberrypi_tail13f623_ts_net_9273
class telegraf_raspberrypi_tail13f623_ts_net_9273 scrape;
Prom -. scrape .-> telegraf_telegraf_9273
class telegraf_telegraf_9273 scrape;
Prom -. scrape .-> traefik_raspberrypi_tail13f623_ts_net_8080
class traefik_raspberrypi_tail13f623_ts_net_8080 scrape;
Prom -. scrape .-> traefik_traefik_lan_ddnsgeek_com_8080
class traefik_traefik_lan_ddnsgeek_com_8080 scrape;
-77
View File
@@ -1,77 +0,0 @@
# Monitoring Coverage
## Overview
This page is generated from Prometheus-observed runtime inventory. It supplements declared architecture docs and does not replace static source-of-truth configuration.
- Inventory timestamp: `2026-04-13T06:36:45Z`
- Prometheus URL: `http://prometheus:9090`
- Active scrape targets observed: `19`
- Unhealthy scrape targets observed: `0`
## Coverage by job
| job | active targets | unhealthy targets |
| --- | --- | --- |
| container-updates | 2 | 0 |
| kuma | 2 | 0 |
| node | 7 | 0 |
| pihole | 1 | 0 |
| prometheus | 1 | 0 |
| proxmox-storage | 2 | 0 |
| telegraf | 2 | 0 |
| traefik | 2 | 0 |
## Coverage by instance
| instance | jobs | health |
| --- | --- | --- |
| docker-update-exporter:9105 | container-updates | 1/1 up |
| kuma.lan.ddnsgeek.com | kuma | 1/1 up |
| monitor-kuma:3001 | kuma | 1/1 up |
| nix-cache:9100 | node | 1/1 up |
| node-exporter:9100 | node | 1/1 up |
| pbs.sweet.home:9100 | node | 1/1 up |
| pbs.sweet.home:9102 | proxmox-storage | 1/1 up |
| pihole-exporter:9617 | pihole | 1/1 up |
| pihole:9100 | node | 1/1 up |
| prometheus:9090 | prometheus | 1/1 up |
| pve.sweet.home:9100 | node | 1/1 up |
| pve.sweet.home:9101 | proxmox-storage | 1/1 up |
| raspberrypi.tail13f623.ts.net:8080 | traefik | 1/1 up |
| raspberrypi.tail13f623.ts.net:9100 | node | 1/1 up |
| raspberrypi.tail13f623.ts.net:9105 | container-updates | 1/1 up |
| raspberrypi.tail13f623.ts.net:9273 | telegraf | 1/1 up |
| server:9100 | node | 1/1 up |
| telegraf:9273 | telegraf | 1/1 up |
| traefik.lan.ddnsgeek.com:8080 | traefik | 1/1 up |
## Coverage by service
| service | instances | health |
| --- | --- | --- |
| unknown | docker-update-exporter:9105, kuma.lan.ddnsgeek.com, monitor-kuma:3001, nix-cache:9100, node-exporter:9100, pbs.sweet.home:9100, pbs.sweet.home:9102, pihole-exporter:9617, pihole:9100, prometheus:9090, pve.sweet.home:9100, pve.sweet.home:9101, raspberrypi.tail13f623.ts.net:8080, raspberrypi.tail13f623.ts.net:9100, raspberrypi.tail13f623.ts.net:9105, raspberrypi.tail13f623.ts.net:9273, server:9100, telegraf:9273, traefik.lan.ddnsgeek.com:8080 | 19/19 up |
## Unhealthy targets
| job | instance | scrape URL | health | last error |
| --- | --- | --- | --- | --- |
| none | | | | |
## Unknowns / missing metadata
| label | targets missing |
| --- | --- |
| exposure | 19 |
| hostname | 19 |
| hypervisor | 19 |
| network | 19 |
| service | 19 |
Unknown or missing metadata is treated as `unknown` in generated summaries to avoid over-claiming topology.
## Regeneration instructions
```bash
python3 scripts/render_prometheus_docs.py --inventory-file docs/runtime/prometheus-inventory.json
```
-52
View File
@@ -1,52 +0,0 @@
# Network and Exposure View (Prometheus Observed)
## Overview
This document is generated from Prometheus scrape metadata and endpoint URLs. It is an observed monitoring view and not a physical network map.
- Inventory timestamp: `2026-04-13T06:36:45Z`
- Physical topology, VLAN mapping, and bridge membership remain unknown unless explicitly documented elsewhere.
## Observed scrape endpoints
| job | instance | scrape URL | network label | exposure label |
| --- | --- | --- | --- | --- |
| container-updates | docker-update-exporter:9105 | http://docker-update-exporter:9105/metrics | unknown | unknown |
| container-updates | raspberrypi.tail13f623.ts.net:9105 | http://raspberrypi.tail13f623.ts.net:9105/metrics | unknown | unknown |
| kuma | kuma.lan.ddnsgeek.com | http://kuma.lan.ddnsgeek.com/metrics | unknown | unknown |
| kuma | monitor-kuma:3001 | http://monitor-kuma:3001/metrics | unknown | unknown |
| node | nix-cache:9100 | http://nix-cache:9100/metrics | unknown | unknown |
| node | node-exporter:9100 | http://node-exporter:9100/metrics | unknown | unknown |
| node | pbs.sweet.home:9100 | http://pbs.sweet.home:9100/metrics | unknown | unknown |
| node | pihole:9100 | http://pihole:9100/metrics | unknown | unknown |
| node | pve.sweet.home:9100 | http://pve.sweet.home:9100/metrics | unknown | unknown |
| node | raspberrypi.tail13f623.ts.net:9100 | http://raspberrypi.tail13f623.ts.net:9100/metrics | unknown | unknown |
| node | server:9100 | http://server:9100/metrics | unknown | unknown |
| pihole | pihole-exporter:9617 | http://pihole-exporter:9617/metrics | unknown | unknown |
| prometheus | prometheus:9090 | http://prometheus:9090/metrics | unknown | unknown |
| proxmox-storage | pbs.sweet.home:9102 | http://pbs.sweet.home:9102/metrics | unknown | unknown |
| proxmox-storage | pve.sweet.home:9101 | http://pve.sweet.home:9101/metrics | unknown | unknown |
| telegraf | raspberrypi.tail13f623.ts.net:9273 | http://raspberrypi.tail13f623.ts.net:9273/metrics | unknown | unknown |
| telegraf | telegraf:9273 | http://telegraf:9273/metrics | unknown | unknown |
| traefik | raspberrypi.tail13f623.ts.net:8080 | http://raspberrypi.tail13f623.ts.net:8080/metrics | unknown | unknown |
| traefik | traefik.lan.ddnsgeek.com:8080 | http://traefik.lan.ddnsgeek.com:8080/metrics | unknown | unknown |
## Internal vs public indicators
| exposure label | targets |
| --- | --- |
| unknown | 19 |
All indicators above are label-derived. Missing labels are rendered as `unknown`.
## Monitoring paths
| metrics path | observed targets |
| --- | --- |
| /metrics | 19 |
## Unknowns and limits
- Prometheus can confirm scrape reachability but not ownership or placement boundaries.
- No VLAN, switch, or hypervisor placement is inferred unless present in inventory labels.
- Treat this as runtime evidence to pair with declared architecture docs.
-125
View File
@@ -1,125 +0,0 @@
# Prometheus Runtime Inventory
## 1. Overview
- Generated at: `2026-04-13T06:36:45Z`
- Prometheus URL: `http://prometheus:9090`
- Total active targets: `19`
- Unhealthy targets: `0`
- Source type: Observed runtime telemetry (not sole source of truth).
## 2. Scrape jobs
| Job | Observed target count |
| --- | --- |
| container-updates | 2 |
| kuma | 2 |
| node | 7 |
| pihole | 1 |
| prometheus | 1 |
| proxmox-storage | 2 |
| telegraf | 2 |
| traefik | 2 |
## 3. Active targets
| Job | Instance | Health | Scrape URL |
| --- | --- | --- | --- |
| container-updates | docker-update-exporter:9105 | up | http://docker-update-exporter:9105/metrics |
| container-updates | raspberrypi.tail13f623.ts.net:9105 | up | http://raspberrypi.tail13f623.ts.net:9105/metrics |
| kuma | kuma.lan.ddnsgeek.com | up | http://kuma.lan.ddnsgeek.com/metrics |
| kuma | monitor-kuma:3001 | up | http://monitor-kuma:3001/metrics |
| node | nix-cache:9100 | up | http://nix-cache:9100/metrics |
| node | node-exporter:9100 | up | http://node-exporter:9100/metrics |
| node | pbs.sweet.home:9100 | up | http://pbs.sweet.home:9100/metrics |
| node | pihole:9100 | up | http://pihole:9100/metrics |
| node | pve.sweet.home:9100 | up | http://pve.sweet.home:9100/metrics |
| node | raspberrypi.tail13f623.ts.net:9100 | up | http://raspberrypi.tail13f623.ts.net:9100/metrics |
| node | server:9100 | up | http://server:9100/metrics |
| pihole | pihole-exporter:9617 | up | http://pihole-exporter:9617/metrics |
| prometheus | prometheus:9090 | up | http://prometheus:9090/metrics |
| proxmox-storage | pbs.sweet.home:9102 | up | http://pbs.sweet.home:9102/metrics |
| proxmox-storage | pve.sweet.home:9101 | up | http://pve.sweet.home:9101/metrics |
| telegraf | raspberrypi.tail13f623.ts.net:9273 | up | http://raspberrypi.tail13f623.ts.net:9273/metrics |
| telegraf | telegraf:9273 | up | http://telegraf:9273/metrics |
| traefik | raspberrypi.tail13f623.ts.net:8080 | up | http://raspberrypi.tail13f623.ts.net:8080/metrics |
| traefik | traefik.lan.ddnsgeek.com:8080 | up | http://traefik.lan.ddnsgeek.com:8080/metrics |
## 4. Unhealthy targets
| Job | Instance | Health | Last error |
| --- | --- | --- | --- |
| None | | | |
## 5. Hosts / instances observed
| Job | Instance | Series count |
| --- | --- | --- |
| container-updates | docker-update-exporter:9105 | 1 |
| container-updates | raspberrypi.tail13f623.ts.net:9105 | 1 |
| kuma | kuma.lan.ddnsgeek.com | 1 |
| kuma | monitor-kuma:3001 | 1 |
| node | nix-cache:9100 | 1 |
| node | node-exporter:9100 | 1 |
| node | pbs.sweet.home:9100 | 1 |
| node | pihole:9100 | 1 |
| node | pve.sweet.home:9100 | 1 |
| node | raspberrypi.tail13f623.ts.net:9100 | 1 |
| node | server:9100 | 1 |
| pihole | pihole-exporter:9617 | 1 |
| prometheus | prometheus:9090 | 1 |
| proxmox-storage | pbs.sweet.home:9102 | 1 |
| proxmox-storage | pve.sweet.home:9101 | 1 |
| telegraf | raspberrypi.tail13f623.ts.net:9273 | 1 |
| telegraf | telegraf:9273 | 1 |
| traefik | raspberrypi.tail13f623.ts.net:8080 | 1 |
| traefik | traefik.lan.ddnsgeek.com:8080 | 1 |
## 6. Services observed
| Service | Series count |
| --- | --- |
| <missing> | 19 |
## 7. Network / exposure metadata observed
| Category | Label | Series count |
| --- | --- | --- |
| network | <missing> | 19 |
| exposure | <missing> | 19 |
| role | backup | 2 |
| role | cache | 1 |
| role | docker | 6 |
| role | pihole | 2 |
| role | prometheus | 1 |
| role | proxmox | 2 |
| role | raspberrypi | 4 |
| role | server | 1 |
| hypervisor | <missing> | 19 |
## 8. Unknowns / missing metadata
| Missing label | Targets missing |
| --- | --- |
| exposure | 19 |
| hostname | 19 |
| hypervisor | 19 |
| network | 19 |
| service | 19 |
Notes:
- Prometheus runtime data is observational and not authoritative for placement/topology.
- Do not infer Proxmox host placement, VM placement, VLAN layout, or public/internal boundaries without explicit labels or additional inventory sources.
## 9. Regeneration instructions
```bash
export PROMETHEUS_URL="https://prometheus.example.com"
# Optional auth:
# export PROMETHEUS_BEARER_TOKEN="..."
# or export PROMETHEUS_USERNAME="..."; export PROMETHEUS_PASSWORD="..."
python3 scripts/export_prometheus_inventory.py --output-dir docs/runtime
```
This inventory feeds documentation and diagram workflows as an observed-runtime input alongside static repo configuration.
-519
View File
@@ -1,519 +0,0 @@
{
"exposures": {
"<missing>": 19
},
"generated_at": "2026-04-13T06:36:45Z",
"hypervisors": {
"<missing>": 19
},
"instances": {
"container-updates": {
"docker-update-exporter:9105": 1,
"raspberrypi.tail13f623.ts.net:9105": 1
},
"kuma": {
"kuma.lan.ddnsgeek.com": 1,
"monitor-kuma:3001": 1
},
"node": {
"nix-cache:9100": 1,
"node-exporter:9100": 1,
"pbs.sweet.home:9100": 1,
"pihole:9100": 1,
"pve.sweet.home:9100": 1,
"raspberrypi.tail13f623.ts.net:9100": 1,
"server:9100": 1
},
"pihole": {
"pihole-exporter:9617": 1
},
"prometheus": {
"prometheus:9090": 1
},
"proxmox-storage": {
"pbs.sweet.home:9102": 1,
"pve.sweet.home:9101": 1
},
"telegraf": {
"raspberrypi.tail13f623.ts.net:9273": 1,
"telegraf:9273": 1
},
"traefik": {
"raspberrypi.tail13f623.ts.net:8080": 1,
"traefik.lan.ddnsgeek.com:8080": 1
}
},
"jobs": {
"container-updates": 2,
"kuma": 2,
"node": 7,
"pihole": 1,
"prometheus": 1,
"proxmox-storage": 2,
"telegraf": 2,
"traefik": 2
},
"networks": {
"<missing>": 19
},
"notes": [
"The `up` query indicates scrape success from Prometheus perspective only.",
"Use static repository architecture docs and deployment configs with this runtime export for complete diagrams."
],
"prometheus_url": "http://prometheus:9090",
"query_observations": {
"job_count": 8,
"up_series_count": 19
},
"roles": {
"backup": 2,
"cache": 1,
"docker": 6,
"pihole": 2,
"prometheus": 1,
"proxmox": 2,
"raspberrypi": 4,
"server": 1
},
"services": {
"<missing>": 19
},
"targets": [
{
"discovered_labels": {
"__address__": "docker-update-exporter:9105",
"__metrics_path__": "/metrics",
"__scheme__": "http",
"__scrape_interval__": "15s",
"__scrape_timeout__": "10s",
"job": "container-updates",
"role": "docker"
},
"health": "up",
"instance": "docker-update-exporter:9105",
"job": "container-updates",
"labels": {
"instance": "docker-update-exporter:9105",
"job": "container-updates",
"role": "docker"
},
"last_error": "",
"scrape_pool": "container-updates",
"scrape_url": "http://docker-update-exporter:9105/metrics"
},
{
"discovered_labels": {
"__address__": "raspberrypi.tail13f623.ts.net:9105",
"__metrics_path__": "/metrics",
"__scheme__": "http",
"__scrape_interval__": "15s",
"__scrape_timeout__": "10s",
"job": "container-updates",
"role": "raspberrypi"
},
"health": "up",
"instance": "raspberrypi.tail13f623.ts.net:9105",
"job": "container-updates",
"labels": {
"instance": "raspberrypi.tail13f623.ts.net:9105",
"job": "container-updates",
"role": "raspberrypi"
},
"last_error": "",
"scrape_pool": "container-updates",
"scrape_url": "http://raspberrypi.tail13f623.ts.net:9105/metrics"
},
{
"discovered_labels": {
"__address__": "kuma.lan.ddnsgeek.com",
"__metrics_path__": "/metrics",
"__scheme__": "http",
"__scrape_interval__": "30s",
"__scrape_timeout__": "10s",
"job": "kuma",
"role": "raspberrypi"
},
"health": "up",
"instance": "kuma.lan.ddnsgeek.com",
"job": "kuma",
"labels": {
"instance": "kuma.lan.ddnsgeek.com",
"job": "kuma",
"role": "raspberrypi"
},
"last_error": "",
"scrape_pool": "kuma",
"scrape_url": "http://kuma.lan.ddnsgeek.com/metrics"
},
{
"discovered_labels": {
"__address__": "monitor-kuma:3001",
"__metrics_path__": "/metrics",
"__scheme__": "http",
"__scrape_interval__": "30s",
"__scrape_timeout__": "10s",
"job": "kuma",
"role": "docker"
},
"health": "up",
"instance": "monitor-kuma:3001",
"job": "kuma",
"labels": {
"instance": "monitor-kuma:3001",
"job": "kuma",
"role": "docker"
},
"last_error": "",
"scrape_pool": "kuma",
"scrape_url": "http://monitor-kuma:3001/metrics"
},
{
"discovered_labels": {
"__address__": "nix-cache:9100",
"__metrics_path__": "/metrics",
"__scheme__": "http",
"__scrape_interval__": "15s",
"__scrape_timeout__": "10s",
"job": "node",
"role": "cache"
},
"health": "up",
"instance": "nix-cache:9100",
"job": "node",
"labels": {
"instance": "nix-cache:9100",
"job": "node",
"role": "cache"
},
"last_error": "",
"scrape_pool": "node",
"scrape_url": "http://nix-cache:9100/metrics"
},
{
"discovered_labels": {
"__address__": "node-exporter:9100",
"__metrics_path__": "/metrics",
"__scheme__": "http",
"__scrape_interval__": "15s",
"__scrape_timeout__": "10s",
"job": "node",
"role": "docker"
},
"health": "up",
"instance": "node-exporter:9100",
"job": "node",
"labels": {
"instance": "node-exporter:9100",
"job": "node",
"role": "docker"
},
"last_error": "",
"scrape_pool": "node",
"scrape_url": "http://node-exporter:9100/metrics"
},
{
"discovered_labels": {
"__address__": "pbs.sweet.home:9100",
"__metrics_path__": "/metrics",
"__scheme__": "http",
"__scrape_interval__": "15s",
"__scrape_timeout__": "10s",
"job": "node",
"role": "backup"
},
"health": "up",
"instance": "pbs.sweet.home:9100",
"job": "node",
"labels": {
"instance": "pbs.sweet.home:9100",
"job": "node",
"role": "backup"
},
"last_error": "",
"scrape_pool": "node",
"scrape_url": "http://pbs.sweet.home:9100/metrics"
},
{
"discovered_labels": {
"__address__": "pihole:9100",
"__metrics_path__": "/metrics",
"__scheme__": "http",
"__scrape_interval__": "15s",
"__scrape_timeout__": "10s",
"job": "node",
"role": "pihole"
},
"health": "up",
"instance": "pihole:9100",
"job": "node",
"labels": {
"instance": "pihole:9100",
"job": "node",
"role": "pihole"
},
"last_error": "",
"scrape_pool": "node",
"scrape_url": "http://pihole:9100/metrics"
},
{
"discovered_labels": {
"__address__": "pve.sweet.home:9100",
"__metrics_path__": "/metrics",
"__scheme__": "http",
"__scrape_interval__": "15s",
"__scrape_timeout__": "10s",
"job": "node",
"role": "proxmox"
},
"health": "up",
"instance": "pve.sweet.home:9100",
"job": "node",
"labels": {
"instance": "pve.sweet.home:9100",
"job": "node",
"role": "proxmox"
},
"last_error": "",
"scrape_pool": "node",
"scrape_url": "http://pve.sweet.home:9100/metrics"
},
{
"discovered_labels": {
"__address__": "raspberrypi.tail13f623.ts.net:9100",
"__metrics_path__": "/metrics",
"__scheme__": "http",
"__scrape_interval__": "15s",
"__scrape_timeout__": "10s",
"job": "node",
"role": "raspberrypi"
},
"health": "up",
"instance": "raspberrypi.tail13f623.ts.net:9100",
"job": "node",
"labels": {
"instance": "raspberrypi.tail13f623.ts.net:9100",
"job": "node",
"role": "raspberrypi"
},
"last_error": "",
"scrape_pool": "node",
"scrape_url": "http://raspberrypi.tail13f623.ts.net:9100/metrics"
},
{
"discovered_labels": {
"__address__": "server:9100",
"__metrics_path__": "/metrics",
"__scheme__": "http",
"__scrape_interval__": "15s",
"__scrape_timeout__": "10s",
"job": "node",
"role": "server"
},
"health": "up",
"instance": "server:9100",
"job": "node",
"labels": {
"instance": "server:9100",
"job": "node",
"role": "server"
},
"last_error": "",
"scrape_pool": "node",
"scrape_url": "http://server:9100/metrics"
},
{
"discovered_labels": {
"__address__": "pihole-exporter:9617",
"__metrics_path__": "/metrics",
"__scheme__": "http",
"__scrape_interval__": "15s",
"__scrape_timeout__": "10s",
"job": "pihole",
"role": "pihole"
},
"health": "up",
"instance": "pihole-exporter:9617",
"job": "pihole",
"labels": {
"instance": "pihole-exporter:9617",
"job": "pihole",
"role": "pihole"
},
"last_error": "",
"scrape_pool": "pihole",
"scrape_url": "http://pihole-exporter:9617/metrics"
},
{
"discovered_labels": {
"__address__": "prometheus:9090",
"__metrics_path__": "/metrics",
"__scheme__": "http",
"__scrape_interval__": "15s",
"__scrape_timeout__": "10s",
"job": "prometheus",
"role": "prometheus"
},
"health": "up",
"instance": "prometheus:9090",
"job": "prometheus",
"labels": {
"instance": "prometheus:9090",
"job": "prometheus",
"role": "prometheus"
},
"last_error": "",
"scrape_pool": "prometheus",
"scrape_url": "http://prometheus:9090/metrics"
},
{
"discovered_labels": {
"__address__": "pbs.sweet.home:9102",
"__metrics_path__": "/metrics",
"__scheme__": "http",
"__scrape_interval__": "15s",
"__scrape_timeout__": "10s",
"job": "proxmox-storage",
"role": "backup",
"storage": "datastore"
},
"health": "up",
"instance": "pbs.sweet.home:9102",
"job": "proxmox-storage",
"labels": {
"instance": "pbs.sweet.home:9102",
"job": "proxmox-storage",
"role": "backup",
"storage": "datastore"
},
"last_error": "",
"scrape_pool": "proxmox-storage",
"scrape_url": "http://pbs.sweet.home:9102/metrics"
},
{
"discovered_labels": {
"__address__": "pve.sweet.home:9101",
"__metrics_path__": "/metrics",
"__scheme__": "http",
"__scrape_interval__": "15s",
"__scrape_timeout__": "10s",
"job": "proxmox-storage",
"role": "proxmox",
"storage": "lvm"
},
"health": "up",
"instance": "pve.sweet.home:9101",
"job": "proxmox-storage",
"labels": {
"instance": "pve.sweet.home:9101",
"job": "proxmox-storage",
"role": "proxmox",
"storage": "lvm"
},
"last_error": "",
"scrape_pool": "proxmox-storage",
"scrape_url": "http://pve.sweet.home:9101/metrics"
},
{
"discovered_labels": {
"__address__": "raspberrypi.tail13f623.ts.net:9273",
"__metrics_path__": "/metrics",
"__scheme__": "http",
"__scrape_interval__": "15s",
"__scrape_timeout__": "10s",
"job": "telegraf",
"role": "docker"
},
"health": "up",
"instance": "raspberrypi.tail13f623.ts.net:9273",
"job": "telegraf",
"labels": {
"instance": "raspberrypi.tail13f623.ts.net:9273",
"job": "telegraf",
"role": "docker"
},
"last_error": "",
"scrape_pool": "telegraf",
"scrape_url": "http://raspberrypi.tail13f623.ts.net:9273/metrics"
},
{
"discovered_labels": {
"__address__": "telegraf:9273",
"__metrics_path__": "/metrics",
"__scheme__": "http",
"__scrape_interval__": "15s",
"__scrape_timeout__": "10s",
"job": "telegraf",
"role": "docker"
},
"health": "up",
"instance": "telegraf:9273",
"job": "telegraf",
"labels": {
"instance": "telegraf:9273",
"job": "telegraf",
"role": "docker"
},
"last_error": "",
"scrape_pool": "telegraf",
"scrape_url": "http://telegraf:9273/metrics"
},
{
"discovered_labels": {
"__address__": "raspberrypi.tail13f623.ts.net:8080",
"__metrics_path__": "/metrics",
"__scheme__": "http",
"__scrape_interval__": "15s",
"__scrape_timeout__": "10s",
"job": "traefik",
"role": "raspberrypi"
},
"health": "up",
"instance": "raspberrypi.tail13f623.ts.net:8080",
"job": "traefik",
"labels": {
"instance": "raspberrypi.tail13f623.ts.net:8080",
"job": "traefik",
"role": "raspberrypi"
},
"last_error": "",
"scrape_pool": "traefik",
"scrape_url": "http://raspberrypi.tail13f623.ts.net:8080/metrics"
},
{
"discovered_labels": {
"__address__": "traefik.lan.ddnsgeek.com:8080",
"__metrics_path__": "/metrics",
"__scheme__": "http",
"__scrape_interval__": "15s",
"__scrape_timeout__": "10s",
"job": "traefik",
"role": "docker"
},
"health": "up",
"instance": "traefik.lan.ddnsgeek.com:8080",
"job": "traefik",
"labels": {
"instance": "traefik.lan.ddnsgeek.com:8080",
"job": "traefik",
"role": "docker"
},
"last_error": "",
"scrape_pool": "traefik",
"scrape_url": "http://traefik.lan.ddnsgeek.com:8080/metrics"
}
],
"unhealthy_targets": [],
"unknowns": {
"missing_label_counts": {
"exposure": 19,
"hostname": 19,
"hypervisor": 19,
"network": 19,
"service": 19
},
"notes": [
"Prometheus runtime data is observational and not authoritative for placement/topology.",
"Do not infer Proxmox host placement, VM placement, VLAN layout, or public/internal boundaries without explicit labels or additional inventory sources."
]
}
}
@@ -1,15 +0,0 @@
{
"data": {
"result": [
{
"metric": {},
"value": [
1776062205.716,
"19"
]
}
],
"resultType": "vector"
},
"status": "success"
}
@@ -1,15 +0,0 @@
{
"data": {
"result": [
{
"metric": {},
"value": [
1776062205.714,
"19"
]
}
],
"resultType": "vector"
},
"status": "success"
}
@@ -1,198 +0,0 @@
{
"data": {
"result": [
{
"metric": {
"instance": "pbs.sweet.home:9100",
"job": "node"
},
"value": [
1776062205.712,
"1"
]
},
{
"metric": {
"instance": "raspberrypi.tail13f623.ts.net:8080",
"job": "traefik"
},
"value": [
1776062205.712,
"1"
]
},
{
"metric": {
"instance": "traefik.lan.ddnsgeek.com:8080",
"job": "traefik"
},
"value": [
1776062205.712,
"1"
]
},
{
"metric": {
"instance": "pve.sweet.home:9101",
"job": "proxmox-storage"
},
"value": [
1776062205.712,
"1"
]
},
{
"metric": {
"instance": "raspberrypi.tail13f623.ts.net:9273",
"job": "telegraf"
},
"value": [
1776062205.712,
"1"
]
},
{
"metric": {
"instance": "docker-update-exporter:9105",
"job": "container-updates"
},
"value": [
1776062205.712,
"1"
]
},
{
"metric": {
"instance": "pihole:9100",
"job": "node"
},
"value": [
1776062205.712,
"1"
]
},
{
"metric": {
"instance": "pihole-exporter:9617",
"job": "pihole"
},
"value": [
1776062205.712,
"1"
]
},
{
"metric": {
"instance": "raspberrypi.tail13f623.ts.net:9105",
"job": "container-updates"
},
"value": [
1776062205.712,
"1"
]
},
{
"metric": {
"instance": "pve.sweet.home:9100",
"job": "node"
},
"value": [
1776062205.712,
"1"
]
},
{
"metric": {
"instance": "prometheus:9090",
"job": "prometheus"
},
"value": [
1776062205.712,
"1"
]
},
{
"metric": {
"instance": "telegraf:9273",
"job": "telegraf"
},
"value": [
1776062205.712,
"1"
]
},
{
"metric": {
"instance": "raspberrypi.tail13f623.ts.net:9100",
"job": "node"
},
"value": [
1776062205.712,
"1"
]
},
{
"metric": {
"instance": "node-exporter:9100",
"job": "node"
},
"value": [
1776062205.712,
"1"
]
},
{
"metric": {
"instance": "pbs.sweet.home:9102",
"job": "proxmox-storage"
},
"value": [
1776062205.712,
"1"
]
},
{
"metric": {
"instance": "nix-cache:9100",
"job": "node"
},
"value": [
1776062205.712,
"1"
]
},
{
"metric": {
"instance": "server:9100",
"job": "node"
},
"value": [
1776062205.712,
"1"
]
},
{
"metric": {
"instance": "kuma.lan.ddnsgeek.com",
"job": "kuma"
},
"value": [
1776062205.712,
"1"
]
},
{
"metric": {
"instance": "monitor-kuma:3001",
"job": "kuma"
},
"value": [
1776062205.712,
"1"
]
}
],
"resultType": "vector"
},
"status": "success"
}
-80
View File
@@ -1,80 +0,0 @@
{
"data": {
"result": [
{
"metric": {
"job": "node"
},
"value": [
1776062205.711,
"7"
]
},
{
"metric": {
"job": "traefik"
},
"value": [
1776062205.711,
"2"
]
},
{
"metric": {
"job": "proxmox-storage"
},
"value": [
1776062205.711,
"2"
]
},
{
"metric": {
"job": "telegraf"
},
"value": [
1776062205.711,
"2"
]
},
{
"metric": {
"job": "container-updates"
},
"value": [
1776062205.711,
"2"
]
},
{
"metric": {
"job": "pihole"
},
"value": [
1776062205.711,
"1"
]
},
{
"metric": {
"job": "prometheus"
},
"value": [
1776062205.711,
"1"
]
},
{
"metric": {
"job": "kuma"
},
"value": [
1776062205.711,
"2"
]
}
],
"resultType": "vector"
},
"status": "success"
}
@@ -1,15 +0,0 @@
{
"data": {
"result": [
{
"metric": {},
"value": [
1776062205.715,
"19"
]
}
],
"resultType": "vector"
},
"status": "success"
}
-80
View File
@@ -1,80 +0,0 @@
{
"data": {
"result": [
{
"metric": {
"role": "backup"
},
"value": [
1776062205.714,
"2"
]
},
{
"metric": {
"role": "raspberrypi"
},
"value": [
1776062205.714,
"4"
]
},
{
"metric": {
"role": "docker"
},
"value": [
1776062205.714,
"6"
]
},
{
"metric": {
"role": "proxmox"
},
"value": [
1776062205.714,
"2"
]
},
{
"metric": {
"role": "pihole"
},
"value": [
1776062205.714,
"2"
]
},
{
"metric": {
"role": "prometheus"
},
"value": [
1776062205.714,
"1"
]
},
{
"metric": {
"role": "cache"
},
"value": [
1776062205.714,
"1"
]
},
{
"metric": {
"role": "server"
},
"value": [
1776062205.714,
"1"
]
}
],
"resultType": "vector"
},
"status": "success"
}
@@ -1,15 +0,0 @@
{
"data": {
"result": [
{
"metric": {},
"value": [
1776062205.713,
"19"
]
}
],
"resultType": "vector"
},
"status": "success"
}
-238
View File
@@ -1,238 +0,0 @@
{
"data": {
"result": [
{
"metric": {
"__name__": "up",
"instance": "pbs.sweet.home:9100",
"job": "node",
"role": "backup"
},
"value": [
1776062205.71,
"1"
]
},
{
"metric": {
"__name__": "up",
"instance": "raspberrypi.tail13f623.ts.net:8080",
"job": "traefik",
"role": "raspberrypi"
},
"value": [
1776062205.71,
"1"
]
},
{
"metric": {
"__name__": "up",
"instance": "traefik.lan.ddnsgeek.com:8080",
"job": "traefik",
"role": "docker"
},
"value": [
1776062205.71,
"1"
]
},
{
"metric": {
"__name__": "up",
"instance": "pve.sweet.home:9101",
"job": "proxmox-storage",
"role": "proxmox",
"storage": "lvm"
},
"value": [
1776062205.71,
"1"
]
},
{
"metric": {
"__name__": "up",
"instance": "raspberrypi.tail13f623.ts.net:9273",
"job": "telegraf",
"role": "docker"
},
"value": [
1776062205.71,
"1"
]
},
{
"metric": {
"__name__": "up",
"instance": "docker-update-exporter:9105",
"job": "container-updates",
"role": "docker"
},
"value": [
1776062205.71,
"1"
]
},
{
"metric": {
"__name__": "up",
"instance": "pihole:9100",
"job": "node",
"role": "pihole"
},
"value": [
1776062205.71,
"1"
]
},
{
"metric": {
"__name__": "up",
"instance": "pihole-exporter:9617",
"job": "pihole",
"role": "pihole"
},
"value": [
1776062205.71,
"1"
]
},
{
"metric": {
"__name__": "up",
"instance": "raspberrypi.tail13f623.ts.net:9105",
"job": "container-updates",
"role": "raspberrypi"
},
"value": [
1776062205.71,
"1"
]
},
{
"metric": {
"__name__": "up",
"instance": "pve.sweet.home:9100",
"job": "node",
"role": "proxmox"
},
"value": [
1776062205.71,
"1"
]
},
{
"metric": {
"__name__": "up",
"instance": "prometheus:9090",
"job": "prometheus",
"role": "prometheus"
},
"value": [
1776062205.71,
"1"
]
},
{
"metric": {
"__name__": "up",
"instance": "telegraf:9273",
"job": "telegraf",
"role": "docker"
},
"value": [
1776062205.71,
"1"
]
},
{
"metric": {
"__name__": "up",
"instance": "raspberrypi.tail13f623.ts.net:9100",
"job": "node",
"role": "raspberrypi"
},
"value": [
1776062205.71,
"1"
]
},
{
"metric": {
"__name__": "up",
"instance": "node-exporter:9100",
"job": "node",
"role": "docker"
},
"value": [
1776062205.71,
"1"
]
},
{
"metric": {
"__name__": "up",
"instance": "pbs.sweet.home:9102",
"job": "proxmox-storage",
"role": "backup",
"storage": "datastore"
},
"value": [
1776062205.71,
"1"
]
},
{
"metric": {
"__name__": "up",
"instance": "nix-cache:9100",
"job": "node",
"role": "cache"
},
"value": [
1776062205.71,
"1"
]
},
{
"metric": {
"__name__": "up",
"instance": "server:9100",
"job": "node",
"role": "server"
},
"value": [
1776062205.71,
"1"
]
},
{
"metric": {
"__name__": "up",
"instance": "kuma.lan.ddnsgeek.com",
"job": "kuma",
"role": "raspberrypi"
},
"value": [
1776062205.71,
"1"
]
},
{
"metric": {
"__name__": "up",
"instance": "monitor-kuma:3001",
"job": "kuma",
"role": "docker"
},
"value": [
1776062205.71,
"1"
]
}
],
"resultType": "vector"
},
"status": "success"
}
-497
View File
@@ -1,497 +0,0 @@
{
"data": {
"activeTargets": [
{
"discoveredLabels": {
"__address__": "raspberrypi.tail13f623.ts.net:9105",
"__metrics_path__": "/metrics",
"__scheme__": "http",
"__scrape_interval__": "15s",
"__scrape_timeout__": "10s",
"job": "container-updates",
"role": "raspberrypi"
},
"globalUrl": "http://raspberrypi.tail13f623.ts.net:9105/metrics",
"health": "up",
"labels": {
"instance": "raspberrypi.tail13f623.ts.net:9105",
"job": "container-updates",
"role": "raspberrypi"
},
"lastError": "",
"lastScrape": "2026-04-13T06:36:38.412018212Z",
"lastScrapeDuration": 0.119818002,
"scrapeInterval": "15s",
"scrapePool": "container-updates",
"scrapeTimeout": "10s",
"scrapeUrl": "http://raspberrypi.tail13f623.ts.net:9105/metrics"
},
{
"discoveredLabels": {
"__address__": "docker-update-exporter:9105",
"__metrics_path__": "/metrics",
"__scheme__": "http",
"__scrape_interval__": "15s",
"__scrape_timeout__": "10s",
"job": "container-updates",
"role": "docker"
},
"globalUrl": "http://docker-update-exporter:9105/metrics",
"health": "up",
"labels": {
"instance": "docker-update-exporter:9105",
"job": "container-updates",
"role": "docker"
},
"lastError": "",
"lastScrape": "2026-04-13T06:36:35.654237456Z",
"lastScrapeDuration": 0.002254582,
"scrapeInterval": "15s",
"scrapePool": "container-updates",
"scrapeTimeout": "10s",
"scrapeUrl": "http://docker-update-exporter:9105/metrics"
},
{
"discoveredLabels": {
"__address__": "monitor-kuma:3001",
"__metrics_path__": "/metrics",
"__scheme__": "http",
"__scrape_interval__": "30s",
"__scrape_timeout__": "10s",
"job": "kuma",
"role": "docker"
},
"globalUrl": "http://monitor-kuma:3001/metrics",
"health": "up",
"labels": {
"instance": "monitor-kuma:3001",
"job": "kuma",
"role": "docker"
},
"lastError": "",
"lastScrape": "2026-04-13T06:36:27.982541752Z",
"lastScrapeDuration": 0.069706065,
"scrapeInterval": "30s",
"scrapePool": "kuma",
"scrapeTimeout": "10s",
"scrapeUrl": "http://monitor-kuma:3001/metrics"
},
{
"discoveredLabels": {
"__address__": "kuma.lan.ddnsgeek.com",
"__metrics_path__": "/metrics",
"__scheme__": "http",
"__scrape_interval__": "30s",
"__scrape_timeout__": "10s",
"job": "kuma",
"role": "raspberrypi"
},
"globalUrl": "http://kuma.lan.ddnsgeek.com/metrics",
"health": "up",
"labels": {
"instance": "kuma.lan.ddnsgeek.com",
"job": "kuma",
"role": "raspberrypi"
},
"lastError": "",
"lastScrape": "2026-04-13T06:36:39.961722007Z",
"lastScrapeDuration": 1.470526665,
"scrapeInterval": "30s",
"scrapePool": "kuma",
"scrapeTimeout": "10s",
"scrapeUrl": "http://kuma.lan.ddnsgeek.com/metrics"
},
{
"discoveredLabels": {
"__address__": "pve.sweet.home:9100",
"__metrics_path__": "/metrics",
"__scheme__": "http",
"__scrape_interval__": "15s",
"__scrape_timeout__": "10s",
"job": "node",
"role": "proxmox"
},
"globalUrl": "http://pve.sweet.home:9100/metrics",
"health": "up",
"labels": {
"instance": "pve.sweet.home:9100",
"job": "node",
"role": "proxmox"
},
"lastError": "",
"lastScrape": "2026-04-13T06:36:45.136284048Z",
"lastScrapeDuration": 0.077552327,
"scrapeInterval": "15s",
"scrapePool": "node",
"scrapeTimeout": "10s",
"scrapeUrl": "http://pve.sweet.home:9100/metrics"
},
{
"discoveredLabels": {
"__address__": "pbs.sweet.home:9100",
"__metrics_path__": "/metrics",
"__scheme__": "http",
"__scrape_interval__": "15s",
"__scrape_timeout__": "10s",
"job": "node",
"role": "backup"
},
"globalUrl": "http://pbs.sweet.home:9100/metrics",
"health": "up",
"labels": {
"instance": "pbs.sweet.home:9100",
"job": "node",
"role": "backup"
},
"lastError": "",
"lastScrape": "2026-04-13T06:36:43.953992414Z",
"lastScrapeDuration": 0.042913089,
"scrapeInterval": "15s",
"scrapePool": "node",
"scrapeTimeout": "10s",
"scrapeUrl": "http://pbs.sweet.home:9100/metrics"
},
{
"discoveredLabels": {
"__address__": "pihole:9100",
"__metrics_path__": "/metrics",
"__scheme__": "http",
"__scrape_interval__": "15s",
"__scrape_timeout__": "10s",
"job": "node",
"role": "pihole"
},
"globalUrl": "http://pihole:9100/metrics",
"health": "up",
"labels": {
"instance": "pihole:9100",
"job": "node",
"role": "pihole"
},
"lastError": "",
"lastScrape": "2026-04-13T06:36:42.158765958Z",
"lastScrapeDuration": 0.027103133,
"scrapeInterval": "15s",
"scrapePool": "node",
"scrapeTimeout": "10s",
"scrapeUrl": "http://pihole:9100/metrics"
},
{
"discoveredLabels": {
"__address__": "server:9100",
"__metrics_path__": "/metrics",
"__scheme__": "http",
"__scrape_interval__": "15s",
"__scrape_timeout__": "10s",
"job": "node",
"role": "server"
},
"globalUrl": "http://server:9100/metrics",
"health": "up",
"labels": {
"instance": "server:9100",
"job": "node",
"role": "server"
},
"lastError": "",
"lastScrape": "2026-04-13T06:36:31.985355223Z",
"lastScrapeDuration": 0.010159325,
"scrapeInterval": "15s",
"scrapePool": "node",
"scrapeTimeout": "10s",
"scrapeUrl": "http://server:9100/metrics"
},
{
"discoveredLabels": {
"__address__": "nix-cache:9100",
"__metrics_path__": "/metrics",
"__scheme__": "http",
"__scrape_interval__": "15s",
"__scrape_timeout__": "10s",
"job": "node",
"role": "cache"
},
"globalUrl": "http://nix-cache:9100/metrics",
"health": "up",
"labels": {
"instance": "nix-cache:9100",
"job": "node",
"role": "cache"
},
"lastError": "",
"lastScrape": "2026-04-13T06:36:32.054207846Z",
"lastScrapeDuration": 0.011150377,
"scrapeInterval": "15s",
"scrapePool": "node",
"scrapeTimeout": "10s",
"scrapeUrl": "http://nix-cache:9100/metrics"
},
{
"discoveredLabels": {
"__address__": "node-exporter:9100",
"__metrics_path__": "/metrics",
"__scheme__": "http",
"__scrape_interval__": "15s",
"__scrape_timeout__": "10s",
"job": "node",
"role": "docker"
},
"globalUrl": "http://node-exporter:9100/metrics",
"health": "up",
"labels": {
"instance": "node-exporter:9100",
"job": "node",
"role": "docker"
},
"lastError": "",
"lastScrape": "2026-04-13T06:36:39.839919677Z",
"lastScrapeDuration": 0.076223114,
"scrapeInterval": "15s",
"scrapePool": "node",
"scrapeTimeout": "10s",
"scrapeUrl": "http://node-exporter:9100/metrics"
},
{
"discoveredLabels": {
"__address__": "raspberrypi.tail13f623.ts.net:9100",
"__metrics_path__": "/metrics",
"__scheme__": "http",
"__scrape_interval__": "15s",
"__scrape_timeout__": "10s",
"job": "node",
"role": "raspberrypi"
},
"globalUrl": "http://raspberrypi.tail13f623.ts.net:9100/metrics",
"health": "up",
"labels": {
"instance": "raspberrypi.tail13f623.ts.net:9100",
"job": "node",
"role": "raspberrypi"
},
"lastError": "",
"lastScrape": "2026-04-13T06:36:36.961379652Z",
"lastScrapeDuration": 0.292301106,
"scrapeInterval": "15s",
"scrapePool": "node",
"scrapeTimeout": "10s",
"scrapeUrl": "http://raspberrypi.tail13f623.ts.net:9100/metrics"
},
{
"discoveredLabels": {
"__address__": "pihole-exporter:9617",
"__metrics_path__": "/metrics",
"__scheme__": "http",
"__scrape_interval__": "15s",
"__scrape_timeout__": "10s",
"job": "pihole",
"role": "pihole"
},
"globalUrl": "http://pihole-exporter:9617/metrics",
"health": "up",
"labels": {
"instance": "pihole-exporter:9617",
"job": "pihole",
"role": "pihole"
},
"lastError": "",
"lastScrape": "2026-04-13T06:36:43.038435285Z",
"lastScrapeDuration": 0.49729699,
"scrapeInterval": "15s",
"scrapePool": "pihole",
"scrapeTimeout": "10s",
"scrapeUrl": "http://pihole-exporter:9617/metrics"
},
{
"discoveredLabels": {
"__address__": "prometheus:9090",
"__metrics_path__": "/metrics",
"__scheme__": "http",
"__scrape_interval__": "15s",
"__scrape_timeout__": "10s",
"job": "prometheus",
"role": "prometheus"
},
"globalUrl": "http://prometheus:9090/metrics",
"health": "up",
"labels": {
"instance": "prometheus:9090",
"job": "prometheus",
"role": "prometheus"
},
"lastError": "",
"lastScrape": "2026-04-13T06:36:39.065876441Z",
"lastScrapeDuration": 0.004656092,
"scrapeInterval": "15s",
"scrapePool": "prometheus",
"scrapeTimeout": "10s",
"scrapeUrl": "http://prometheus:9090/metrics"
},
{
"discoveredLabels": {
"__address__": "pve.sweet.home:9101",
"__metrics_path__": "/metrics",
"__scheme__": "http",
"__scrape_interval__": "15s",
"__scrape_timeout__": "10s",
"job": "proxmox-storage",
"role": "proxmox",
"storage": "lvm"
},
"globalUrl": "http://pve.sweet.home:9101/metrics",
"health": "up",
"labels": {
"instance": "pve.sweet.home:9101",
"job": "proxmox-storage",
"role": "proxmox",
"storage": "lvm"
},
"lastError": "",
"lastScrape": "2026-04-13T06:36:44.889795345Z",
"lastScrapeDuration": 0.003225566,
"scrapeInterval": "15s",
"scrapePool": "proxmox-storage",
"scrapeTimeout": "10s",
"scrapeUrl": "http://pve.sweet.home:9101/metrics"
},
{
"discoveredLabels": {
"__address__": "pbs.sweet.home:9102",
"__metrics_path__": "/metrics",
"__scheme__": "http",
"__scrape_interval__": "15s",
"__scrape_timeout__": "10s",
"job": "proxmox-storage",
"role": "backup",
"storage": "datastore"
},
"globalUrl": "http://pbs.sweet.home:9102/metrics",
"health": "up",
"labels": {
"instance": "pbs.sweet.home:9102",
"job": "proxmox-storage",
"role": "backup",
"storage": "datastore"
},
"lastError": "",
"lastScrape": "2026-04-13T06:36:32.748484232Z",
"lastScrapeDuration": 0.002049134,
"scrapeInterval": "15s",
"scrapePool": "proxmox-storage",
"scrapeTimeout": "10s",
"scrapeUrl": "http://pbs.sweet.home:9102/metrics"
},
{
"discoveredLabels": {
"__address__": "raspberrypi.tail13f623.ts.net:9273",
"__metrics_path__": "/metrics",
"__scheme__": "http",
"__scrape_interval__": "15s",
"__scrape_timeout__": "10s",
"job": "telegraf",
"role": "docker"
},
"globalUrl": "http://raspberrypi.tail13f623.ts.net:9273/metrics",
"health": "up",
"labels": {
"instance": "raspberrypi.tail13f623.ts.net:9273",
"job": "telegraf",
"role": "docker"
},
"lastError": "",
"lastScrape": "2026-04-13T06:36:36.677793239Z",
"lastScrapeDuration": 0.151265608,
"scrapeInterval": "15s",
"scrapePool": "telegraf",
"scrapeTimeout": "10s",
"scrapeUrl": "http://raspberrypi.tail13f623.ts.net:9273/metrics"
},
{
"discoveredLabels": {
"__address__": "telegraf:9273",
"__metrics_path__": "/metrics",
"__scheme__": "http",
"__scrape_interval__": "15s",
"__scrape_timeout__": "10s",
"job": "telegraf",
"role": "docker"
},
"globalUrl": "http://telegraf:9273/metrics",
"health": "up",
"labels": {
"instance": "telegraf:9273",
"job": "telegraf",
"role": "docker"
},
"lastError": "",
"lastScrape": "2026-04-13T06:36:44.055413407Z",
"lastScrapeDuration": 0.152691132,
"scrapeInterval": "15s",
"scrapePool": "telegraf",
"scrapeTimeout": "10s",
"scrapeUrl": "http://telegraf:9273/metrics"
},
{
"discoveredLabels": {
"__address__": "traefik.lan.ddnsgeek.com:8080",
"__metrics_path__": "/metrics",
"__scheme__": "http",
"__scrape_interval__": "15s",
"__scrape_timeout__": "10s",
"job": "traefik",
"role": "docker"
},
"globalUrl": "http://traefik.lan.ddnsgeek.com:8080/metrics",
"health": "up",
"labels": {
"instance": "traefik.lan.ddnsgeek.com:8080",
"job": "traefik",
"role": "docker"
},
"lastError": "",
"lastScrape": "2026-04-13T06:36:34.549111604Z",
"lastScrapeDuration": 0.004579596,
"scrapeInterval": "15s",
"scrapePool": "traefik",
"scrapeTimeout": "10s",
"scrapeUrl": "http://traefik.lan.ddnsgeek.com:8080/metrics"
},
{
"discoveredLabels": {
"__address__": "raspberrypi.tail13f623.ts.net:8080",
"__metrics_path__": "/metrics",
"__scheme__": "http",
"__scrape_interval__": "15s",
"__scrape_timeout__": "10s",
"job": "traefik",
"role": "raspberrypi"
},
"globalUrl": "http://raspberrypi.tail13f623.ts.net:8080/metrics",
"health": "up",
"labels": {
"instance": "raspberrypi.tail13f623.ts.net:8080",
"job": "traefik",
"role": "raspberrypi"
},
"lastError": "",
"lastScrape": "2026-04-13T06:36:35.550082255Z",
"lastScrapeDuration": 0.062750046,
"scrapeInterval": "15s",
"scrapePool": "traefik",
"scrapeTimeout": "10s",
"scrapeUrl": "http://raspberrypi.tail13f623.ts.net:8080/metrics"
}
],
"droppedTargetCounts": {
"container-updates": 0,
"kuma": 0,
"node": 0,
"pihole": 0,
"prometheus": 0,
"proxmox-storage": 0,
"telegraf": 0,
"traefik": 0
},
"droppedTargets": []
},
"status": "success"
}
-475
View File
@@ -1,475 +0,0 @@
#!/usr/bin/env python3
"""Export Prometheus runtime inventory for documentation/diagram workflows."""
from __future__ import annotations
import argparse
import json
import os
import ssl
import sys
from collections import Counter, defaultdict
from dataclasses import dataclass
from datetime import datetime, timezone
from pathlib import Path
from typing import Any
from urllib import error, parse, request
DEFAULT_TIMEOUT = 10.0
class ExportError(RuntimeError):
"""Raised for expected hard failures in export flow."""
@dataclass
class Config:
prometheus_url: str
output_dir: Path
timeout: float
verify_tls: bool
bearer_token: str | None
username: str | None
password: str | None
verbose: bool
def parse_bool(value: str | None, default: bool = True) -> bool:
if value is None:
return default
normalized = value.strip().lower()
if normalized in {"1", "true", "yes", "on"}:
return True
if normalized in {"0", "false", "no", "off"}:
return False
raise ExportError(f"Invalid boolean value: {value!r}")
def parse_args(argv: list[str]) -> argparse.Namespace:
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument("--output-dir", default="docs/runtime", help="Directory for JSON artifacts")
parser.add_argument("--prometheus-url", help="Prometheus base URL")
parser.add_argument("--timeout", type=float, help="HTTP timeout in seconds")
parser.add_argument("--insecure", action="store_true", help="Disable TLS verification")
parser.add_argument("--verbose", action="store_true", help="Print progress details")
return parser.parse_args(argv)
def load_config(args: argparse.Namespace) -> Config:
prometheus_url = args.prometheus_url or os.environ.get("PROMETHEUS_URL")
if not prometheus_url:
raise ExportError(
"PROMETHEUS_URL is required. Set PROMETHEUS_URL or pass --prometheus-url."
)
timeout_value = args.timeout
if timeout_value is None:
timeout_raw = os.environ.get("PROMETHEUS_TIMEOUT")
timeout_value = float(timeout_raw) if timeout_raw else DEFAULT_TIMEOUT
verify_tls = parse_bool(os.environ.get("PROMETHEUS_VERIFY_TLS"), default=True)
if args.insecure:
verify_tls = False
return Config(
prometheus_url=prometheus_url.rstrip("/"),
output_dir=Path(args.output_dir),
timeout=timeout_value,
verify_tls=verify_tls,
bearer_token=os.environ.get("PROMETHEUS_BEARER_TOKEN"),
username=os.environ.get("PROMETHEUS_USERNAME"),
password=os.environ.get("PROMETHEUS_PASSWORD"),
verbose=args.verbose,
)
def build_opener(config: Config) -> request.OpenerDirector:
handlers: list[Any] = []
if config.username and config.password and not config.bearer_token:
password_manager = request.HTTPPasswordMgrWithDefaultRealm()
password_manager.add_password(None, config.prometheus_url, config.username, config.password)
handlers.append(request.HTTPBasicAuthHandler(password_manager))
if config.prometheus_url.startswith("https://") and not config.verify_tls:
insecure_context = ssl.create_default_context()
insecure_context.check_hostname = False
insecure_context.verify_mode = ssl.CERT_NONE
handlers.append(request.HTTPSHandler(context=insecure_context))
return request.build_opener(*handlers)
def api_get_json(
opener: request.OpenerDirector,
config: Config,
endpoint: str,
params: dict[str, str] | None = None,
) -> dict[str, Any]:
url = f"{config.prometheus_url}{endpoint}"
if params:
url = f"{url}?{parse.urlencode(params)}"
req = request.Request(url)
req.add_header("Accept", "application/json")
if config.bearer_token:
req.add_header("Authorization", f"Bearer {config.bearer_token}")
try:
with opener.open(req, timeout=config.timeout) as response:
body = response.read().decode("utf-8")
except error.HTTPError as exc:
detail = exc.read().decode("utf-8", errors="replace")
raise ExportError(f"Prometheus API error for {endpoint}: HTTP {exc.code} - {detail}") from exc
except error.URLError as exc:
raise ExportError(
f"Failed to reach Prometheus at {config.prometheus_url}: {exc.reason}"
) from exc
except TimeoutError as exc:
raise ExportError(
f"Timed out reaching Prometheus at {config.prometheus_url} after {config.timeout}s"
) from exc
try:
payload = json.loads(body)
except json.JSONDecodeError as exc:
raise ExportError(f"Invalid JSON returned by {endpoint}") from exc
if payload.get("status") != "success":
raise ExportError(f"Prometheus API returned non-success status for {endpoint}: {payload}")
return payload
def write_json(path: Path, payload: dict[str, Any]) -> None:
path.parent.mkdir(parents=True, exist_ok=True)
with path.open("w", encoding="utf-8") as handle:
json.dump(payload, handle, indent=2, sort_keys=True)
handle.write("\n")
def extract_vector(query_response: dict[str, Any]) -> list[dict[str, Any]]:
data = query_response.get("data", {})
if data.get("resultType") != "vector":
return []
return data.get("result", [])
def counter_from_vector(results: list[dict[str, Any]], label: str) -> dict[str, int]:
output: dict[str, int] = {}
for item in results:
metric = item.get("metric", {})
key = metric.get(label, "<missing>")
value_raw = item.get("value", [None, "0"])[1]
try:
output[key] = int(float(value_raw))
except (TypeError, ValueError):
output[key] = 0
return dict(sorted(output.items(), key=lambda kv: kv[0]))
def nested_counter_from_vector(
results: list[dict[str, Any]],
parent_label: str,
child_label: str,
) -> dict[str, dict[str, int]]:
nested: dict[str, dict[str, int]] = defaultdict(dict)
for item in results:
metric = item.get("metric", {})
parent = metric.get(parent_label, "<missing>")
child = metric.get(child_label, "<missing>")
value_raw = item.get("value", [None, "0"])[1]
try:
value = int(float(value_raw))
except (TypeError, ValueError):
value = 0
nested[parent][child] = value
return {
parent: dict(sorted(children.items(), key=lambda kv: kv[0]))
for parent, children in sorted(nested.items(), key=lambda kv: kv[0])
}
def summarize_targets(targets_payload: dict[str, Any]) -> tuple[list[dict[str, Any]], list[dict[str, Any]], Counter]:
data = targets_payload.get("data", {})
active_targets = data.get("activeTargets", [])
normalized_targets: list[dict[str, Any]] = []
unhealthy_targets: list[dict[str, Any]] = []
missing_labels = Counter()
important_labels = ["hostname", "service", "role", "hypervisor", "network", "exposure"]
for target in sorted(active_targets, key=lambda t: (t.get("labels", {}).get("job", ""), t.get("scrapeUrl", ""))):
labels = target.get("labels", {})
discovered = target.get("discoveredLabels", {})
merged = dict(discovered)
merged.update(labels)
for label in important_labels:
if label not in merged:
missing_labels[label] += 1
normalized = {
"job": labels.get("job", "<missing>"),
"instance": labels.get("instance", "<missing>"),
"health": target.get("health", "unknown"),
"scrape_pool": target.get("scrapePool"),
"scrape_url": target.get("scrapeUrl"),
"last_error": target.get("lastError") or "",
"labels": labels,
"discovered_labels": discovered,
}
normalized_targets.append(normalized)
if target.get("health") != "up":
unhealthy_targets.append(normalized)
return normalized_targets, unhealthy_targets, missing_labels
def build_inventory(
config: Config,
targets_payload: dict[str, Any],
query_payloads: dict[str, dict[str, Any]],
) -> dict[str, Any]:
now = datetime.now(timezone.utc).replace(microsecond=0).isoformat().replace("+00:00", "Z")
targets, unhealthy_targets, missing_labels = summarize_targets(targets_payload)
up_results = extract_vector(query_payloads["up"])
jobs_results = extract_vector(query_payloads["jobs"])
job_instance_results = extract_vector(query_payloads["job_instance"])
optional = {
"services": counter_from_vector(extract_vector(query_payloads["service"]), "service"),
"roles": counter_from_vector(extract_vector(query_payloads["role"]), "role"),
"hypervisors": counter_from_vector(extract_vector(query_payloads["hypervisor"]), "hypervisor"),
"networks": counter_from_vector(extract_vector(query_payloads["network"]), "network"),
"exposures": counter_from_vector(extract_vector(query_payloads["exposure"]), "exposure"),
}
unknowns = {
"missing_label_counts": dict(sorted(missing_labels.items(), key=lambda kv: kv[0])),
"notes": [
"Prometheus runtime data is observational and not authoritative for placement/topology.",
"Do not infer Proxmox host placement, VM placement, VLAN layout, or public/internal boundaries without explicit labels or additional inventory sources.",
],
}
return {
"generated_at": now,
"prometheus_url": config.prometheus_url,
"targets": targets,
"jobs": counter_from_vector(jobs_results, "job"),
"instances": nested_counter_from_vector(job_instance_results, "job", "instance"),
"services": optional["services"],
"roles": optional["roles"],
"hypervisors": optional["hypervisors"],
"networks": optional["networks"],
"exposures": optional["exposures"],
"unhealthy_targets": unhealthy_targets,
"unknowns": unknowns,
"notes": [
"The `up` query indicates scrape success from Prometheus perspective only.",
"Use static repository architecture docs and deployment configs with this runtime export for complete diagrams.",
],
"query_observations": {
"up_series_count": len(up_results),
"job_count": len(counter_from_vector(jobs_results, "job")),
},
}
def markdown_table(headers: list[str], rows: list[list[str]]) -> str:
table = ["| " + " | ".join(headers) + " |", "| " + " | ".join(["---"] * len(headers)) + " |"]
for row in rows:
table.append("| " + " | ".join(row) + " |")
return "\n".join(table)
def generate_markdown(inventory: dict[str, Any]) -> str:
jobs_rows = [[job, str(count)] for job, count in inventory["jobs"].items()] or [["<none>", "0"]]
targets_rows = [
[target["job"], target["instance"], target["health"], target.get("scrape_url") or ""]
for target in inventory["targets"]
] or [["<none>", "<none>", "unknown", ""]]
unhealthy_rows = [
[target["job"], target["instance"], target["health"], target.get("last_error", "")]
for target in inventory["unhealthy_targets"]
] or [["None", "", "", ""]]
host_rows: list[list[str]] = []
for job, instances in inventory["instances"].items():
for instance, count in instances.items():
host_rows.append([job, instance, str(count)])
if not host_rows:
host_rows = [["<none>", "<none>", "0"]]
service_rows = [[name, str(value)] for name, value in inventory["services"].items()] or [["<none>", "0"]]
network_rows: list[list[str]] = []
for section in ["networks", "exposures", "roles", "hypervisors"]:
for name, value in inventory[section].items():
network_rows.append([section[:-1], name, str(value)])
if not network_rows:
network_rows = [["<none>", "<none>", "0"]]
unknown_rows = [
[label, str(count)]
for label, count in inventory["unknowns"].get("missing_label_counts", {}).items()
] or [["<none>", "0"]]
lines = [
"# Prometheus Runtime Inventory",
"",
"## 1. Overview",
"",
f"- Generated at: `{inventory['generated_at']}`",
f"- Prometheus URL: `{inventory['prometheus_url']}`",
f"- Total active targets: `{len(inventory['targets'])}`",
f"- Unhealthy targets: `{len(inventory['unhealthy_targets'])}`",
"- Source type: Observed runtime telemetry (not sole source of truth).",
"",
"## 2. Scrape jobs",
"",
markdown_table(["Job", "Observed target count"], jobs_rows),
"",
"## 3. Active targets",
"",
markdown_table(["Job", "Instance", "Health", "Scrape URL"], targets_rows),
"",
"## 4. Unhealthy targets",
"",
markdown_table(["Job", "Instance", "Health", "Last error"], unhealthy_rows),
"",
"## 5. Hosts / instances observed",
"",
markdown_table(["Job", "Instance", "Series count"], host_rows),
"",
"## 6. Services observed",
"",
markdown_table(["Service", "Series count"], service_rows),
"",
"## 7. Network / exposure metadata observed",
"",
markdown_table(["Category", "Label", "Series count"], network_rows),
"",
"## 8. Unknowns / missing metadata",
"",
markdown_table(["Missing label", "Targets missing"], unknown_rows),
"",
"Notes:",
"",
]
for note in inventory["unknowns"].get("notes", []):
lines.append(f"- {note}")
lines.extend(
[
"",
"## 9. Regeneration instructions",
"",
"```bash",
"export PROMETHEUS_URL=\"https://prometheus.example.com\"",
"# Optional auth:",
"# export PROMETHEUS_BEARER_TOKEN=\"...\"",
"# or export PROMETHEUS_USERNAME=\"...\"; export PROMETHEUS_PASSWORD=\"...\"",
"python3 scripts/export_prometheus_inventory.py --output-dir docs/runtime",
"```",
"",
"This inventory feeds documentation and diagram workflows as an observed-runtime input alongside static repo configuration.",
"",
]
)
return "\n".join(lines)
def write_markdown(path: Path, text: str) -> None:
path.parent.mkdir(parents=True, exist_ok=True)
path.write_text(text, encoding="utf-8")
def log(config: Config, message: str) -> None:
if config.verbose:
print(message, file=sys.stderr)
def main(argv: list[str]) -> int:
try:
args = parse_args(argv)
config = load_config(args)
opener = build_opener(config)
output_dir = config.output_dir
output_dir.mkdir(parents=True, exist_ok=True)
endpoint_map = {
"targets": ("/api/v1/targets", None, "prometheus-targets.json"),
"up": ("/api/v1/query", {"query": "up"}, "prometheus-query-up.json"),
"jobs": (
"/api/v1/query",
{"query": "count by (job) (up)"},
"prometheus-query-jobs.json",
),
"job_instance": (
"/api/v1/query",
{"query": "count by (job, instance) (up)"},
"prometheus-query-job-instance.json",
),
"service": (
"/api/v1/query",
{"query": "count by (service) (up)"},
"prometheus-query-service.json",
),
"role": (
"/api/v1/query",
{"query": "count by (role) (up)"},
"prometheus-query-role.json",
),
"hypervisor": (
"/api/v1/query",
{"query": "count by (hypervisor) (up)"},
"prometheus-query-hypervisor.json",
),
"network": (
"/api/v1/query",
{"query": "count by (network) (up)"},
"prometheus-query-network.json",
),
"exposure": (
"/api/v1/query",
{"query": "count by (exposure) (up)"},
"prometheus-query-exposure.json",
),
}
payloads: dict[str, dict[str, Any]] = {}
for name, (endpoint, params, filename) in endpoint_map.items():
log(config, f"Querying {name}: {endpoint}")
payload = api_get_json(opener, config, endpoint, params=params)
payloads[name] = payload
write_json(output_dir / filename, payload)
inventory = build_inventory(config, payloads["targets"], payloads)
write_json(output_dir / "prometheus-inventory.json", inventory)
markdown = generate_markdown(inventory)
markdown_path = output_dir.parent / "prometheus-inventory.md"
write_markdown(markdown_path, markdown)
log(config, f"Wrote inventory artifacts to {output_dir} and {markdown_path}")
return 0
except ExportError as exc:
print(f"ERROR: {exc}", file=sys.stderr)
return 1
if __name__ == "__main__":
sys.exit(main(sys.argv[1:]))
-427
View File
@@ -1,427 +0,0 @@
#!/usr/bin/env python3
"""Render Prometheus inventory into documentation and Mermaid diagrams."""
from __future__ import annotations
import argparse
import json
import re
from collections import defaultdict
from pathlib import Path
from typing import Any
from urllib.parse import urlparse
GENERATED_BEGIN = "<!-- BEGIN GENERATED PROMETHEUS SECTION -->"
GENERATED_END = "<!-- END GENERATED PROMETHEUS SECTION -->"
def parse_args() -> argparse.Namespace:
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument(
"--inventory-file",
default="docs/runtime/prometheus-inventory.json",
help="Path to normalized Prometheus inventory JSON.",
)
parser.add_argument("--docs-dir", default="docs", help="Documentation directory.")
parser.add_argument("--diagrams-dir", default="docs/diagrams", help="Diagram output directory.")
parser.add_argument("--readme-file", default="README.md", help="README path for regeneration notes.")
parser.add_argument("--architecture-file", default="docs/architecture.md", help="Architecture markdown path.")
parser.add_argument("--network-file", default="docs/network.md", help="Network markdown path.")
parser.add_argument("--coverage-file", default="docs/monitoring-coverage.md", help="Coverage markdown path.")
parser.add_argument("--dry-run", action="store_true", help="Print changes instead of writing files.")
parser.add_argument("--verbose", action="store_true", help="Print detailed processing output.")
return parser.parse_args()
def load_json(path: Path) -> dict[str, Any]:
with path.open("r", encoding="utf-8") as handle:
data = json.load(handle)
if not isinstance(data, dict):
raise ValueError(f"Inventory must be a JSON object: {path}")
return data
def merged_labels(target: dict[str, Any]) -> dict[str, str]:
discovered = target.get("discovered_labels") or {}
labels = target.get("labels") or {}
merged = {k: str(v) for k, v in discovered.items()}
merged.update({k: str(v) for k, v in labels.items()})
return merged
def normalize_targets(inventory: dict[str, Any]) -> list[dict[str, Any]]:
normalized: list[dict[str, Any]] = []
for target in inventory.get("targets") or []:
if not isinstance(target, dict):
continue
labels = merged_labels(target)
parsed = urlparse(str(target.get("scrape_url") or ""))
host = parsed.hostname or labels.get("hostname") or "unknown"
endpoint = parsed.path or "/metrics"
normalized.append(
{
"job": str(target.get("job") or labels.get("job") or "<missing>"),
"instance": str(target.get("instance") or labels.get("instance") or "<missing>"),
"health": str(target.get("health") or "unknown"),
"scrape_url": str(target.get("scrape_url") or ""),
"last_error": str(target.get("last_error") or ""),
"host": host,
"endpoint": endpoint,
"service": labels.get("service", "unknown"),
"role": labels.get("role", "unknown"),
"hypervisor": labels.get("hypervisor", "unknown"),
"network": labels.get("network", "unknown"),
"exposure": labels.get("exposure", "unknown"),
}
)
normalized.sort(key=lambda t: (t["job"], t["instance"], t["scrape_url"]))
return normalized
def markdown_table(headers: list[str], rows: list[list[str]]) -> str:
line = "| " + " | ".join(headers) + " |"
sep = "| " + " | ".join(["---"] * len(headers)) + " |"
body = ["| " + " | ".join(row) + " |" for row in rows]
return "\n".join([line, sep, *body])
def summarize_targets(targets: list[dict[str, Any]], unhealthy: list[dict[str, Any]]) -> dict[str, Any]:
by_job: dict[str, dict[str, int]] = defaultdict(lambda: {"active": 0, "unhealthy": 0})
by_instance: dict[str, dict[str, Any]] = defaultdict(lambda: {"jobs": set(), "unhealthy": 0, "total": 0})
by_service: dict[str, dict[str, Any]] = defaultdict(lambda: {"instances": set(), "unhealthy": 0, "total": 0})
by_exposure: dict[str, int] = defaultdict(int)
unhealthy_set = {(u["job"], u["instance"], u["scrape_url"]) for u in unhealthy}
for target in targets:
job = target["job"]
instance = target["instance"]
service = target["service"] if target["service"] != "<missing>" else "unknown"
by_job[job]["active"] += 1
key = (target["job"], target["instance"], target["scrape_url"])
if key in unhealthy_set or target["health"] != "up":
by_job[job]["unhealthy"] += 1
by_instance[instance]["unhealthy"] += 1
by_service[service]["unhealthy"] += 1
by_instance[instance]["jobs"].add(job)
by_instance[instance]["total"] += 1
by_service[service]["instances"].add(instance)
by_service[service]["total"] += 1
by_exposure[target["exposure"]] += 1
return {
"by_job": dict(sorted(by_job.items())),
"by_instance": {
key: {
"jobs": sorted(value["jobs"]),
"unhealthy": value["unhealthy"],
"total": value["total"],
}
for key, value in sorted(by_instance.items())
},
"by_service": {
key: {
"instances": sorted(value["instances"]),
"unhealthy": value["unhealthy"],
"total": value["total"],
}
for key, value in sorted(by_service.items())
},
"by_exposure": dict(sorted(by_exposure.items())),
}
def render_monitoring_coverage(inventory: dict[str, Any], targets: list[dict[str, Any]]) -> str:
unhealthy = normalize_targets({"targets": inventory.get("unhealthy_targets") or []})
summaries = summarize_targets(targets, unhealthy)
missing = (inventory.get("unknowns") or {}).get("missing_label_counts") or {}
lines = [
"# Monitoring Coverage",
"",
"## Overview",
"",
"This page is generated from Prometheus-observed runtime inventory. It supplements declared architecture docs and does not replace static source-of-truth configuration.",
"",
f"- Inventory timestamp: `{inventory.get('generated_at', 'unknown')}`",
f"- Prometheus URL: `{inventory.get('prometheus_url', 'unknown')}`",
f"- Active scrape targets observed: `{len(targets)}`",
f"- Unhealthy scrape targets observed: `{len(unhealthy)}`",
"",
"## Coverage by job",
"",
]
job_rows = [
[job, str(data["active"]), str(data["unhealthy"])]
for job, data in summaries["by_job"].items()
] or [["none", "0", "0"]]
lines.append(markdown_table(["job", "active targets", "unhealthy targets"], job_rows))
lines.extend(["", "## Coverage by instance", ""])
instance_rows = [
[instance, ", ".join(data["jobs"]), f"{data['total'] - data['unhealthy']}/{data['total']} up"]
for instance, data in summaries["by_instance"].items()
] or [["none", "", ""]]
lines.append(markdown_table(["instance", "jobs", "health"], instance_rows))
lines.extend(["", "## Coverage by service", ""])
service_rows = [
[service, ", ".join(data["instances"]), f"{data['total'] - data['unhealthy']}/{data['total']} up"]
for service, data in summaries["by_service"].items()
] or [["unknown", "", ""]]
lines.append(markdown_table(["service", "instances", "health"], service_rows))
lines.extend(["", "## Unhealthy targets", ""])
unhealthy_rows = [
[u["job"], u["instance"], u["scrape_url"], u["health"], u["last_error"] or "none"]
for u in unhealthy
] or [["none", "", "", "", ""]]
lines.append(markdown_table(["job", "instance", "scrape URL", "health", "last error"], unhealthy_rows))
lines.extend(["", "## Unknowns / missing metadata", ""])
missing_rows = [[k, str(v)] for k, v in sorted(missing.items())] or [["none", "0"]]
lines.append(markdown_table(["label", "targets missing"], missing_rows))
lines.extend(
[
"",
"Unknown or missing metadata is treated as `unknown` in generated summaries to avoid over-claiming topology.",
"",
"## Regeneration instructions",
"",
"```bash",
"python3 scripts/render_prometheus_docs.py --inventory-file docs/runtime/prometheus-inventory.json",
"```",
"",
]
)
return "\n".join(lines)
def render_network_doc(inventory: dict[str, Any], targets: list[dict[str, Any]]) -> str:
summaries = summarize_targets(targets, normalize_targets({"targets": inventory.get("unhealthy_targets") or []}))
endpoint_rows = [[t["job"], t["instance"], t["scrape_url"], t["network"], t["exposure"]] for t in targets]
endpoint_rows = endpoint_rows or [["none", "", "", "", ""]]
exposure_rows = [[exp, str(count)] for exp, count in summaries["by_exposure"].items()] or [["unknown", "0"]]
paths = sorted({t["endpoint"] for t in targets})
path_rows = [[path, str(sum(1 for t in targets if t["endpoint"] == path))] for path in paths] or [["/metrics", "0"]]
lines = [
"# Network and Exposure View (Prometheus Observed)",
"",
"## Overview",
"",
"This document is generated from Prometheus scrape metadata and endpoint URLs. It is an observed monitoring view and not a physical network map.",
"",
f"- Inventory timestamp: `{inventory.get('generated_at', 'unknown')}`",
"- Physical topology, VLAN mapping, and bridge membership remain unknown unless explicitly documented elsewhere.",
"",
"## Observed scrape endpoints",
"",
markdown_table(["job", "instance", "scrape URL", "network label", "exposure label"], endpoint_rows),
"",
"## Internal vs public indicators",
"",
markdown_table(["exposure label", "targets"], exposure_rows),
"",
"All indicators above are label-derived. Missing labels are rendered as `unknown`.",
"",
"## Monitoring paths",
"",
markdown_table(["metrics path", "observed targets"], path_rows),
"",
"## Unknowns and limits",
"",
"- Prometheus can confirm scrape reachability but not ownership or placement boundaries.",
"- No VLAN, switch, or hypervisor placement is inferred unless present in inventory labels.",
"- Treat this as runtime evidence to pair with declared architecture docs.",
"",
]
return "\n".join(lines)
def render_architecture_section(inventory: dict[str, Any], targets: list[dict[str, Any]]) -> str:
summaries = summarize_targets(targets, normalize_targets({"targets": inventory.get("unhealthy_targets") or []}))
notes = inventory.get("notes") or []
lines = [
"## Runtime visibility from Prometheus",
"",
GENERATED_BEGIN,
"",
"Prometheus inventory provides **observed runtime coverage** of scrape targets. It complements (but does not replace) declared architecture in Compose files and static docs.",
"",
f"- Inventory timestamp: `{inventory.get('generated_at', 'unknown')}`",
f"- Observed jobs: `{len(summaries['by_job'])}`",
f"- Observed instances: `{len(summaries['by_instance'])}`",
f"- Observed services (label-derived): `{len(summaries['by_service'])}`",
"",
"### Observed monitoring view",
"",
markdown_table(
["job", "targets", "unhealthy"],
[[job, str(data["active"]), str(data["unhealthy"])] for job, data in summaries["by_job"].items()] or [["none", "0", "0"]],
),
"",
"### Data sources",
"",
"- `docs/runtime/prometheus-inventory.json` (normalized runtime export)",
"- Prometheus scrape metadata (`targets` + label sets)",
"- Existing repository architecture docs for declared topology",
]
if notes:
lines.extend(["", "### Notes from inventory", ""])
for note in notes:
lines.append(f"- {note}")
lines.extend(["", GENERATED_END, ""])
return "\n".join(lines)
def upsert_generated_section(path: Path, section_markdown: str, dry_run: bool, verbose: bool) -> None:
existing = path.read_text(encoding="utf-8") if path.exists() else ""
section_body = section_markdown
if GENERATED_BEGIN in existing and GENERATED_END in existing:
pattern = re.compile(
rf"{re.escape(GENERATED_BEGIN)}.*?{re.escape(GENERATED_END)}",
re.DOTALL,
)
replacement = "\n".join(
line for line in section_body.splitlines() if line.strip() not in {"## Runtime visibility from Prometheus"}
)
updated = pattern.sub(replacement.strip(), existing)
else:
updated = existing.rstrip() + "\n\n" + section_body.strip() + "\n"
write_file(path, updated, dry_run=dry_run, verbose=verbose)
def mermaid_safe_id(value: str) -> str:
safe = re.sub(r"[^a-zA-Z0-9_]", "_", value)
safe = re.sub(r"_+", "_", safe).strip("_")
return safe or "unknown"
def render_monitoring_mermaid(targets: list[dict[str, Any]]) -> str:
by_host: dict[str, list[dict[str, Any]]] = defaultdict(list)
for target in targets:
by_host[target["host"]].append(target)
lines = [
"flowchart LR",
" Prom[Prometheus]",
"",
" classDef scrape stroke-dasharray: 5 5;",
]
for host, host_targets in sorted(by_host.items()):
host_id = mermaid_safe_id(f"host_{host}")
lines.append(f' subgraph {host_id}["Host: {host}"]')
for target in sorted(host_targets, key=lambda t: (t["job"], t["instance"])):
tid = mermaid_safe_id(f"{target['job']}_{target['instance']}")
label = f"{target['job']}\\n{target['instance']}"
lines.append(f' {tid}["{label}"]')
lines.append(" end")
lines.append("")
for target in targets:
tid = mermaid_safe_id(f"{target['job']}_{target['instance']}")
lines.append(f" Prom -. scrape .-> {tid}")
lines.append(f" class {tid} scrape;")
return "\n".join(lines) + "\n"
def render_architecture_mermaid(targets: list[dict[str, Any]]) -> str:
jobs = sorted({t["job"] for t in targets})
lines = [
"flowchart TB",
" Declared[Declared architecture\\n(Compose + docs)]",
" Runtime[Observed runtime\\n(Prometheus inventory)]",
" Declared --> Runtime",
"",
' subgraph Monitoring["Prometheus observed jobs"]',
]
for job in jobs:
jid = mermaid_safe_id(f"job_{job}")
lines.append(f' {jid}["{job}"]')
lines.extend([" end", "", " Runtime --> Monitoring", ""])
return "\n".join(lines)
def write_file(path: Path, content: str, dry_run: bool, verbose: bool) -> None:
if dry_run:
print(f"[DRY RUN] Would write: {path}")
return
path.parent.mkdir(parents=True, exist_ok=True)
path.write_text(content, encoding="utf-8")
if verbose:
print(f"Wrote {path}")
def update_readme(path: Path, dry_run: bool, verbose: bool) -> None:
if not path.exists():
return
existing = path.read_text(encoding="utf-8")
marker = "## Prometheus Runtime Inventory Export"
snippet = (
"\nRegenerate derived docs/diagrams from inventory:\n\n"
"```bash\n"
"python3 scripts/render_prometheus_docs.py --inventory-file docs/runtime/prometheus-inventory.json\n"
"```\n"
)
if marker in existing and "scripts/render_prometheus_docs.py" not in existing:
updated = existing.replace(marker, marker + snippet)
write_file(path, updated, dry_run=dry_run, verbose=verbose)
def main() -> int:
args = parse_args()
inventory_path = Path(args.inventory_file)
docs_dir = Path(args.docs_dir)
diagrams_dir = Path(args.diagrams_dir)
inventory = load_json(inventory_path)
targets = normalize_targets(inventory)
coverage_path = Path(args.coverage_file)
network_path = Path(args.network_file)
architecture_path = Path(args.architecture_file)
coverage_md = render_monitoring_coverage(inventory, targets)
network_md = render_network_doc(inventory, targets)
architecture_section = render_architecture_section(inventory, targets)
monitoring_mmd = render_monitoring_mermaid(targets)
architecture_mmd = render_architecture_mermaid(targets)
def resolve_doc_path(path: Path) -> Path:
if path.is_absolute():
return path
if len(path.parts) == 1:
return docs_dir / path
return path
write_file(resolve_doc_path(coverage_path), coverage_md, args.dry_run, args.verbose)
write_file(resolve_doc_path(network_path), network_md, args.dry_run, args.verbose)
upsert_generated_section(
resolve_doc_path(architecture_path),
architecture_section,
args.dry_run,
args.verbose,
)
write_file(diagrams_dir / "monitoring-coverage.mmd", monitoring_mmd, args.dry_run, args.verbose)
write_file(diagrams_dir / "architecture.mmd", architecture_mmd, args.dry_run, args.verbose)
update_readme(Path(args.readme_file), args.dry_run, args.verbose)
if args.verbose:
print(f"Processed {len(targets)} targets from {inventory_path}")
return 0
if __name__ == "__main__":
raise SystemExit(main())