feat: Initial code
All checks were successful
Check Code Quality / RuffCheck (push) Successful in 1m4s
Build Docker / BuildImage (push) Successful in 1m26s

This commit is contained in:
2026-03-26 23:07:05 +11:00
parent d8ede00901
commit 0ce79935d7
24 changed files with 2527 additions and 143 deletions

3
collectors/__init__.py Normal file
View File

@@ -0,0 +1,3 @@
from .orchestrator import InventoryCollectorOrchestrator
__all__ = ["InventoryCollectorOrchestrator"]

17
collectors/base.py Normal file
View File

@@ -0,0 +1,17 @@
from dataclasses import dataclass
from typing import Dict, List
@dataclass
class CollectionResult:
source: str
assets: List[Dict]
status: str
error: str = ""
class BaseCollector:
source_name = "unknown"
def collect(self) -> CollectionResult:
raise NotImplementedError

146
collectors/coolify.py Normal file
View File

@@ -0,0 +1,146 @@
from typing import Dict, List
import requests
from config import AppConfig
from .base import BaseCollector, CollectionResult
class CoolifyCollector(BaseCollector):
source_name = "coolify"
def __init__(self, config: AppConfig):
self.config = config
def collect(self) -> CollectionResult:
if not self.config.coolify_enabled:
return CollectionResult(source=self.source_name, assets=[], status="disabled")
if not self.config.coolify_endpoints:
return CollectionResult(source=self.source_name, assets=[], status="skipped", error="No COOLIFY_ENDPOINTS configured")
if not self.config.coolify_api_token:
return CollectionResult(source=self.source_name, assets=[], status="skipped", error="No COOLIFY_API_TOKEN configured")
headers = {
"Accept": "application/json",
"Authorization": f"Bearer {self.config.coolify_api_token}",
}
assets: List[Dict] = []
errors: List[str] = []
for endpoint in self.config.coolify_endpoints:
base = endpoint.rstrip("/")
try:
resp = requests.get(
f"{base}/api/v1/applications",
headers=headers,
timeout=self.config.request_timeout_seconds,
)
resp.raise_for_status()
for app in self._extract_app_list(resp.json()):
app_status = self._derive_status(app)
assets.append(
{
"asset_type": "service",
"external_id": str(app.get("id", app.get("uuid", "unknown-app"))),
"name": app.get("name", "unknown-service"),
"hostname": app.get("fqdn") or app.get("name"),
"status": app_status,
"ip_addresses": [],
"node": endpoint,
"metadata": {
"coolify_uuid": app.get("uuid"),
"environment": app.get("environment_name"),
"repository": app.get("git_repository"),
"raw_status": app.get("status"),
"health": app.get("health"),
"deployment_status": app.get("deployment_status"),
},
}
)
except Exception as exc:
errors.append(f"{endpoint}: {exc}")
if errors and not assets:
return CollectionResult(source=self.source_name, assets=[], status="error", error=" | ".join(errors))
if errors:
return CollectionResult(source=self.source_name, assets=assets, status="degraded", error=" | ".join(errors))
return CollectionResult(source=self.source_name, assets=assets, status="ok")
@staticmethod
def _extract_app_list(payload: object) -> List[Dict]:
if isinstance(payload, list):
return [item for item in payload if isinstance(item, dict)]
if isinstance(payload, dict):
for key in ("data", "applications", "items", "result"):
value = payload.get(key)
if isinstance(value, list):
return [item for item in value if isinstance(item, dict)]
return []
@staticmethod
def _derive_status(app: Dict) -> str:
candidate_fields = [
app.get("status"),
app.get("health"),
app.get("deployment_status"),
app.get("current_status"),
app.get("state"),
]
for value in candidate_fields:
normalized = CoolifyCollector._normalize_status(value)
if normalized != "unknown":
return normalized
if app.get("is_running") is True or app.get("running") is True:
return "running"
if app.get("is_running") is False or app.get("running") is False:
return "stopped"
return "unknown"
@staticmethod
def _normalize_status(value: object) -> str:
if value is None:
return "unknown"
text = str(value).strip().lower()
if not text:
return "unknown"
online = {
"running",
"online",
"healthy",
"up",
"active",
"ready",
"started",
"success",
"completed",
}
offline = {
"stopped",
"offline",
"down",
"unhealthy",
"error",
"failed",
"crashed",
"dead",
"exited",
}
if text in online:
return "running"
if text in offline:
return "stopped"
if "running" in text or "healthy" in text:
return "running"
if "stop" in text or "fail" in text or "unhealthy" in text:
return "stopped"
return text

152
collectors/docker_hosts.py Normal file
View File

@@ -0,0 +1,152 @@
from typing import Dict, List
import importlib
import requests
from config import AppConfig
from .base import BaseCollector, CollectionResult
class DockerHostsCollector(BaseCollector):
source_name = "docker"
def __init__(self, config: AppConfig):
self.config = config
def collect(self) -> CollectionResult:
if not self.config.docker_enabled:
return CollectionResult(source=self.source_name, assets=[], status="disabled")
if not self.config.docker_hosts and not self.config.docker_agent_endpoints:
return CollectionResult(
source=self.source_name,
assets=[],
status="skipped",
error="No DOCKER_HOSTS or DOCKER_AGENT_ENDPOINTS configured",
)
assets: List[Dict] = []
errors: List[str] = []
headers = {"Accept": "application/json"}
if self.config.docker_bearer_token:
headers["Authorization"] = f"Bearer {self.config.docker_bearer_token}"
agent_headers = {"Accept": "application/json"}
if self.config.docker_agent_token:
agent_headers["Authorization"] = f"Bearer {self.config.docker_agent_token}"
for endpoint in self.config.docker_agent_endpoints:
base = endpoint.rstrip("/")
try:
resp = requests.get(
f"{base}/api/v1/containers",
headers=agent_headers,
timeout=self.config.request_timeout_seconds,
)
resp.raise_for_status()
payload = resp.json()
containers = payload.get("containers", []) if isinstance(payload, dict) else payload
for container in containers:
assets.append(
{
"asset_type": "container",
"external_id": container.get("id", "unknown-container"),
"name": container.get("name", "unknown"),
"hostname": container.get("name", "unknown"),
"status": container.get("state", "unknown"),
"ip_addresses": container.get("ip_addresses", []),
"node": endpoint,
"metadata": {
"image": container.get("image", "unknown"),
"ports": container.get("ports", []),
"networks": container.get("networks", []),
"labels": container.get("labels", {}),
"collected_via": "docker-agent",
},
}
)
except Exception as exc:
errors.append(f"{endpoint}: {exc}")
for host in self.config.docker_hosts:
if host.startswith("unix://") or host.startswith("tcp://"):
try:
assets.extend(self._collect_via_docker_sdk(host))
except Exception as exc:
errors.append(f"{host}: {exc}")
continue
base = host.rstrip("/")
try:
resp = requests.get(
f"{base}/containers/json?all=1",
headers=headers,
timeout=self.config.request_timeout_seconds,
)
resp.raise_for_status()
for container in resp.json():
ports = container.get("Ports", [])
networks = list((container.get("NetworkSettings", {}) or {}).get("Networks", {}).keys())
assets.append(
{
"asset_type": "container",
"external_id": container.get("Id", "unknown-container"),
"name": (container.get("Names", ["unknown"])[0] or "unknown").lstrip("/"),
"hostname": container.get("Names", ["unknown"])[0].lstrip("/"),
"status": container.get("State", "unknown"),
"ip_addresses": [],
"node": host,
"metadata": {
"image": container.get("Image"),
"ports": ports,
"networks": networks,
"collected_via": "docker-host-api",
},
}
)
except Exception as exc:
errors.append(f"{host}: {exc}")
if errors and not assets:
return CollectionResult(source=self.source_name, assets=[], status="error", error=" | ".join(errors))
if errors:
return CollectionResult(source=self.source_name, assets=assets, status="degraded", error=" | ".join(errors))
return CollectionResult(source=self.source_name, assets=assets, status="ok")
def _collect_via_docker_sdk(self, host: str) -> List[Dict]:
try:
docker_sdk = importlib.import_module("docker")
except Exception as exc:
raise RuntimeError(f"Docker SDK unavailable: {exc}") from exc
assets: List[Dict] = []
client = docker_sdk.DockerClient(base_url=host)
try:
for container in client.containers.list(all=True):
ports = container.attrs.get("NetworkSettings", {}).get("Ports", {})
networks = list((container.attrs.get("NetworkSettings", {}).get("Networks", {}) or {}).keys())
state = container.attrs.get("State", {}).get("Status", "unknown")
image_obj = container.image
image_name = "unknown"
if image_obj is not None:
image_tags = image_obj.tags or []
image_name = image_tags[0] if image_tags else image_obj.id
assets.append(
{
"asset_type": "container",
"external_id": container.id,
"name": container.name,
"hostname": container.name,
"status": state,
"ip_addresses": [],
"node": host,
"metadata": {
"image": image_name,
"ports": ports,
"networks": networks,
"collected_via": "docker-sdk",
},
}
)
finally:
client.close()
return assets

View File

@@ -0,0 +1,80 @@
from typing import Dict, List
import requests
from config import AppConfig
from .base import BaseCollector, CollectionResult
class NginxFromAgentCollector(BaseCollector):
source_name = "nginx"
def __init__(self, config: AppConfig):
self.config = config
def collect(self) -> CollectionResult:
if not self.config.docker_agent_endpoints:
return CollectionResult(source=self.source_name, assets=[], status="skipped", error="No DOCKER_AGENT_ENDPOINTS configured")
headers = {"Accept": "application/json"}
if self.config.docker_agent_token:
headers["Authorization"] = f"Bearer {self.config.docker_agent_token}"
assets: List[Dict] = []
errors: List[str] = []
for endpoint in self.config.docker_agent_endpoints:
base = endpoint.rstrip("/")
try:
resp = requests.get(
f"{base}/api/v1/nginx-configs",
headers=headers,
timeout=self.config.request_timeout_seconds,
)
resp.raise_for_status()
payload = resp.json()
configs = payload.get("configs", []) if isinstance(payload, dict) else []
for config in configs:
path = config.get("path", "unknown.conf")
server_names = config.get("server_names", []) or []
listens = config.get("listens", []) or []
proxy_pass = config.get("proxy_pass", []) or []
proxy_pass_resolved = config.get("proxy_pass_resolved", []) or []
upstreams = config.get("upstreams", []) or []
upstream_servers = config.get("upstream_servers", []) or []
inferred_targets = config.get("inferred_targets", []) or []
if not server_names:
server_names = [path]
for server_name in server_names:
assets.append(
{
"asset_type": "nginx_site",
"external_id": f"{endpoint}:{path}:{server_name}",
"name": server_name,
"hostname": server_name,
"status": "configured",
"ip_addresses": [],
"node": endpoint,
"metadata": {
"path": path,
"listens": listens,
"proxy_pass": proxy_pass,
"proxy_pass_resolved": proxy_pass_resolved,
"upstreams": upstreams,
"upstream_servers": upstream_servers,
"inferred_targets": inferred_targets,
"collected_via": "docker-agent-nginx",
},
}
)
except Exception as exc:
errors.append(f"{endpoint}: {exc}")
if errors and not assets:
return CollectionResult(source=self.source_name, assets=[], status="error", error=" | ".join(errors))
if errors:
return CollectionResult(source=self.source_name, assets=assets, status="degraded", error=" | ".join(errors))
return CollectionResult(source=self.source_name, assets=assets, status="ok")

View File

@@ -0,0 +1,73 @@
import threading
from dataclasses import dataclass
from typing import Dict, List
from config import AppConfig
from database import InventoryStore
from .base import CollectionResult
from .coolify import CoolifyCollector
from .docker_hosts import DockerHostsCollector
from .nginx_from_agent import NginxFromAgentCollector
from .proxmox import ProxmoxCollector
@dataclass
class RunReport:
run_id: int
status: str
results: List[CollectionResult]
class InventoryCollectorOrchestrator:
def __init__(self, config: AppConfig, store: InventoryStore):
self.config = config
self.store = store
self._run_lock = threading.Lock()
self.collectors = [
ProxmoxCollector(config),
DockerHostsCollector(config),
CoolifyCollector(config),
NginxFromAgentCollector(config),
]
self.store.seed_sources(
{
"proxmox": config.proxmox_enabled,
"docker": config.docker_enabled,
"coolify": config.coolify_enabled,
"nginx": bool(config.docker_agent_endpoints),
}
)
def collect_once(self) -> RunReport:
if not self._run_lock.acquire(blocking=False):
return RunReport(run_id=-1, status="running", results=[])
try:
run_id = self.store.run_start()
results: List[CollectionResult] = []
errors: List[str] = []
for collector in self.collectors:
result = collector.collect()
results.append(result)
self.store.set_source_status(result.source, result.status, result.error)
if result.assets:
self.store.upsert_assets(result.source, result.assets)
if result.status == "error":
errors.append(f"{result.source}: {result.error}")
overall_status = "error" if errors else "ok"
self.store.run_finish(run_id=run_id, status=overall_status, error_summary=" | ".join(errors))
return RunReport(run_id=run_id, status=overall_status, results=results)
finally:
self._run_lock.release()
def current_data(self) -> Dict:
return {
"summary": self.store.summary(),
"topology": self.store.topology(),
"assets": self.store.list_assets(),
"sources": self.store.source_health(),
"last_run": self.store.last_run(),
}

243
collectors/proxmox.py Normal file
View File

@@ -0,0 +1,243 @@
import ipaddress
import re
from typing import Dict, List
import requests
from config import AppConfig
from .base import BaseCollector, CollectionResult
class ProxmoxCollector(BaseCollector):
source_name = "proxmox"
def __init__(self, config: AppConfig):
self.config = config
def collect(self) -> CollectionResult:
if not self.config.proxmox_enabled:
return CollectionResult(source=self.source_name, assets=[], status="disabled")
if not self.config.proxmox_endpoints:
return CollectionResult(source=self.source_name, assets=[], status="skipped", error="No PROXMOX_ENDPOINTS configured")
assets: List[Dict] = []
errors: List[str] = []
for endpoint in self.config.proxmox_endpoints:
try:
assets.extend(self._collect_endpoint(endpoint))
except Exception as exc:
errors.append(f"{endpoint}: {exc}")
if errors and not assets:
return CollectionResult(source=self.source_name, assets=[], status="error", error=" | ".join(errors))
if errors:
return CollectionResult(source=self.source_name, assets=assets, status="degraded", error=" | ".join(errors))
return CollectionResult(source=self.source_name, assets=assets, status="ok")
def _collect_endpoint(self, endpoint: str) -> List[Dict]:
endpoint = endpoint.rstrip("/")
headers = {"Accept": "application/json"}
cookies = None
if self.config.proxmox_token_id and self.config.proxmox_token_secret:
headers["Authorization"] = (
f"PVEAPIToken={self.config.proxmox_token_id}={self.config.proxmox_token_secret}"
)
elif self.config.proxmox_user and self.config.proxmox_password:
token_resp = requests.post(
f"{endpoint}/api2/json/access/ticket",
data={"username": self.config.proxmox_user, "password": self.config.proxmox_password},
timeout=self.config.request_timeout_seconds,
verify=self.config.proxmox_verify_tls,
)
token_resp.raise_for_status()
payload = token_resp.json().get("data", {})
cookies = {"PVEAuthCookie": payload.get("ticket", "")}
csrf = payload.get("CSRFPreventionToken")
if csrf:
headers["CSRFPreventionToken"] = csrf
nodes_resp = requests.get(
f"{endpoint}/api2/json/nodes",
headers=headers,
cookies=cookies,
timeout=self.config.request_timeout_seconds,
verify=self.config.proxmox_verify_tls,
)
nodes_resp.raise_for_status()
nodes = nodes_resp.json().get("data", [])
assets: List[Dict] = []
for node in nodes:
node_name = node.get("node", "unknown-node")
qemu_resp = requests.get(
f"{endpoint}/api2/json/nodes/{node_name}/qemu",
headers=headers,
cookies=cookies,
timeout=self.config.request_timeout_seconds,
verify=self.config.proxmox_verify_tls,
)
qemu_resp.raise_for_status()
for vm in qemu_resp.json().get("data", []):
vm_id = str(vm.get("vmid", ""))
vm_ips = self._collect_qemu_ips(endpoint, node_name, vm_id, headers, cookies)
assets.append(
{
"asset_type": "vm",
"external_id": str(vm.get("vmid", vm.get("name", "unknown-vm"))),
"name": vm.get("name") or f"vm-{vm.get('vmid', 'unknown')}",
"hostname": vm.get("name"),
"status": vm.get("status", "unknown"),
"ip_addresses": vm_ips,
"node": node_name,
"cpu": vm.get("cpus"),
"memory_mb": (vm.get("maxmem", 0) or 0) / (1024 * 1024),
"disk_gb": (vm.get("maxdisk", 0) or 0) / (1024 * 1024 * 1024),
"metadata": {
"endpoint": endpoint,
"uptime_seconds": vm.get("uptime"),
},
}
)
lxc_resp = requests.get(
f"{endpoint}/api2/json/nodes/{node_name}/lxc",
headers=headers,
cookies=cookies,
timeout=self.config.request_timeout_seconds,
verify=self.config.proxmox_verify_tls,
)
lxc_resp.raise_for_status()
for lxc in lxc_resp.json().get("data", []):
lxc_id = str(lxc.get("vmid", ""))
lxc_ips = self._collect_lxc_ips(endpoint, node_name, lxc_id, headers, cookies)
assets.append(
{
"asset_type": "lxc",
"external_id": str(lxc.get("vmid", lxc.get("name", "unknown-lxc"))),
"name": lxc.get("name") or f"lxc-{lxc.get('vmid', 'unknown')}",
"hostname": lxc.get("name"),
"status": lxc.get("status", "unknown"),
"ip_addresses": lxc_ips,
"node": node_name,
"cpu": lxc.get("cpus"),
"memory_mb": (lxc.get("maxmem", 0) or 0) / (1024 * 1024),
"disk_gb": (lxc.get("maxdisk", 0) or 0) / (1024 * 1024 * 1024),
"metadata": {
"endpoint": endpoint,
"uptime_seconds": lxc.get("uptime"),
},
}
)
return assets
def _collect_qemu_ips(self, endpoint: str, node_name: str, vm_id: str, headers: Dict, cookies: Dict | None) -> List[str]:
ips: List[str] = []
# Guest agent provides the most accurate runtime IP list when enabled.
try:
agent_resp = requests.get(
f"{endpoint}/api2/json/nodes/{node_name}/qemu/{vm_id}/agent/network-get-interfaces",
headers=headers,
cookies=cookies,
timeout=self.config.request_timeout_seconds,
verify=self.config.proxmox_verify_tls,
)
if agent_resp.ok:
data = agent_resp.json().get("data", {})
interfaces = data.get("result", []) if isinstance(data, dict) else []
for interface in interfaces:
for addr in interface.get("ip-addresses", []) or []:
value = addr.get("ip-address")
if value:
ips.append(value)
except Exception:
pass
ips.extend(self._collect_config_ips(endpoint, node_name, "qemu", vm_id, headers, cookies))
return self._normalize_ips(ips)
def _collect_lxc_ips(self, endpoint: str, node_name: str, vm_id: str, headers: Dict, cookies: Dict | None) -> List[str]:
ips: List[str] = []
# Runtime interfaces capture DHCP-assigned addresses that are not present in static config.
try:
iface_resp = requests.get(
f"{endpoint}/api2/json/nodes/{node_name}/lxc/{vm_id}/interfaces",
headers=headers,
cookies=cookies,
timeout=self.config.request_timeout_seconds,
verify=self.config.proxmox_verify_tls,
)
if iface_resp.ok:
interfaces = iface_resp.json().get("data", [])
if isinstance(interfaces, list):
for interface in interfaces:
inet_values = interface.get("inet")
if isinstance(inet_values, list):
ips.extend(inet_values)
except Exception:
pass
ips.extend(self._collect_config_ips(endpoint, node_name, "lxc", vm_id, headers, cookies))
return self._normalize_ips(ips)
def _collect_config_ips(
self,
endpoint: str,
node_name: str,
vm_type: str,
vm_id: str,
headers: Dict,
cookies: Dict | None,
) -> List[str]:
try:
config_resp = requests.get(
f"{endpoint}/api2/json/nodes/{node_name}/{vm_type}/{vm_id}/config",
headers=headers,
cookies=cookies,
timeout=self.config.request_timeout_seconds,
verify=self.config.proxmox_verify_tls,
)
if not config_resp.ok:
return []
config = config_resp.json().get("data", {})
except Exception:
return []
values = []
for key, value in config.items():
if not isinstance(value, str):
continue
if key.startswith("net") or key in {"ipconfig0", "ipconfig1", "ipconfig2", "ipconfig3"}:
values.append(value)
ips: List[str] = []
for value in values:
ips.extend(re.findall(r"\b(?:\d{1,3}\.){3}\d{1,3}(?:/\d{1,2})?\b", value))
return ips
@staticmethod
def _normalize_ips(values: List[str]) -> List[str]:
normalized: List[str] = []
seen = set()
for value in values:
candidate = value.strip()
if "/" in candidate:
candidate = candidate.split("/", 1)[0]
try:
ip_obj = ipaddress.ip_address(candidate)
except ValueError:
continue
if ip_obj.is_loopback:
continue
text = str(ip_obj)
if text not in seen:
seen.add(text)
normalized.append(text)
return normalized