feat: Add new status page
This commit is contained in:
29
hnsdoh_status/__init__.py
Normal file
29
hnsdoh_status/__init__.py
Normal file
@@ -0,0 +1,29 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from flask import Flask
|
||||
|
||||
from hnsdoh_status.config import Settings
|
||||
from hnsdoh_status.routes import create_routes
|
||||
from hnsdoh_status.scheduler import create_scheduler
|
||||
from hnsdoh_status.store import StatusStore
|
||||
|
||||
|
||||
scheduler = None
|
||||
|
||||
|
||||
def create_app() -> Flask:
|
||||
app = Flask(__name__)
|
||||
settings = Settings()
|
||||
store = StatusStore(history_size=settings.history_size)
|
||||
|
||||
app.config["SETTINGS"] = settings
|
||||
app.config["STORE"] = store
|
||||
|
||||
create_routes(app, settings, store)
|
||||
|
||||
global scheduler
|
||||
if scheduler is None:
|
||||
scheduler = create_scheduler(settings, store)
|
||||
scheduler.start()
|
||||
|
||||
return app
|
||||
250
hnsdoh_status/checks.py
Normal file
250
hnsdoh_status/checks.py
Normal file
@@ -0,0 +1,250 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import socket
|
||||
import ssl
|
||||
import time
|
||||
from datetime import datetime, timezone
|
||||
|
||||
import dns.message
|
||||
import dns.query
|
||||
import dns.rcode
|
||||
import dns.rdatatype
|
||||
import dns.resolver
|
||||
|
||||
from hnsdoh_status.models import CheckResult, NodeSnapshot, ProtocolName, Snapshot
|
||||
|
||||
|
||||
def utcnow() -> datetime:
|
||||
return datetime.now(timezone.utc)
|
||||
|
||||
|
||||
def discover_nodes(domain: str) -> tuple[list[str], str]:
|
||||
resolver = dns.resolver.Resolver()
|
||||
try:
|
||||
answer = resolver.resolve(domain, "A")
|
||||
return sorted({record.address for record in answer}), ""
|
||||
except Exception as exc: # noqa: BLE001
|
||||
return [], str(exc)
|
||||
|
||||
|
||||
def _check_dns_udp(ip: str, timeout: float) -> CheckResult:
|
||||
started = time.perf_counter()
|
||||
checked_at = utcnow()
|
||||
query = dns.message.make_query("hnsdoh.com", dns.rdatatype.A)
|
||||
try:
|
||||
response = dns.query.udp(query, ip, timeout=timeout, port=53)
|
||||
latency = (time.perf_counter() - started) * 1000
|
||||
return CheckResult(
|
||||
protocol="dns_udp",
|
||||
ok=bool(response.answer),
|
||||
latency_ms=latency,
|
||||
checked_at=checked_at,
|
||||
reason="ok" if response.answer else "empty answer",
|
||||
)
|
||||
except Exception as exc: # noqa: BLE001
|
||||
return CheckResult("dns_udp", False, None, checked_at, str(exc))
|
||||
|
||||
|
||||
def _check_dns_tcp(ip: str, timeout: float) -> CheckResult:
|
||||
started = time.perf_counter()
|
||||
checked_at = utcnow()
|
||||
query = dns.message.make_query("hnsdoh.com", dns.rdatatype.A)
|
||||
try:
|
||||
response = dns.query.tcp(query, ip, timeout=timeout, port=53)
|
||||
latency = (time.perf_counter() - started) * 1000
|
||||
return CheckResult(
|
||||
protocol="dns_tcp",
|
||||
ok=bool(response.answer),
|
||||
latency_ms=latency,
|
||||
checked_at=checked_at,
|
||||
reason="ok" if response.answer else "empty answer",
|
||||
)
|
||||
except Exception as exc: # noqa: BLE001
|
||||
return CheckResult("dns_tcp", False, None, checked_at, str(exc))
|
||||
|
||||
|
||||
def _tls_connection(ip: str, port: int, hostname: str, timeout: float) -> ssl.SSLSocket:
|
||||
context = ssl.create_default_context()
|
||||
raw = socket.create_connection((ip, port), timeout=timeout)
|
||||
try:
|
||||
tls_socket = context.wrap_socket(raw, server_hostname=hostname)
|
||||
return tls_socket
|
||||
except Exception:
|
||||
raw.close()
|
||||
raise
|
||||
|
||||
|
||||
def _decode_chunked_body(data: bytes) -> bytes:
|
||||
output = bytearray()
|
||||
cursor = 0
|
||||
|
||||
while True:
|
||||
line_end = data.find(b"\r\n", cursor)
|
||||
if line_end < 0:
|
||||
raise ValueError("invalid chunk framing")
|
||||
|
||||
size_token = data[cursor:line_end].split(b";", maxsplit=1)[0].strip()
|
||||
size = int(size_token or b"0", 16)
|
||||
cursor = line_end + 2
|
||||
|
||||
if size == 0:
|
||||
break
|
||||
|
||||
next_cursor = cursor + size
|
||||
if next_cursor + 2 > len(data):
|
||||
raise ValueError("truncated chunk payload")
|
||||
output.extend(data[cursor:next_cursor])
|
||||
|
||||
if data[next_cursor : next_cursor + 2] != b"\r\n":
|
||||
raise ValueError("invalid chunk terminator")
|
||||
cursor = next_cursor + 2
|
||||
|
||||
return bytes(output)
|
||||
|
||||
|
||||
def _parse_http_response(response: bytes) -> tuple[str, dict[str, str], bytes]:
|
||||
head, separator, body = response.partition(b"\r\n\r\n")
|
||||
if not separator:
|
||||
raise ValueError("invalid HTTP response")
|
||||
|
||||
lines = head.split(b"\r\n")
|
||||
status_line = lines[0].decode("latin-1", errors="replace")
|
||||
headers: dict[str, str] = {}
|
||||
|
||||
for line in lines[1:]:
|
||||
if b":" not in line:
|
||||
continue
|
||||
key, value = line.split(b":", maxsplit=1)
|
||||
headers[key.decode("latin-1", errors="replace").lower()] = value.decode(
|
||||
"latin-1", errors="replace"
|
||||
).strip()
|
||||
|
||||
transfer_encoding = headers.get("transfer-encoding", "").lower()
|
||||
if "chunked" in transfer_encoding:
|
||||
body = _decode_chunked_body(body)
|
||||
|
||||
return status_line, headers, body
|
||||
|
||||
|
||||
def _check_doh(ip: str, hostname: str, path: str, timeout: float) -> CheckResult:
|
||||
started = time.perf_counter()
|
||||
checked_at = utcnow()
|
||||
query = dns.message.make_query(hostname, dns.rdatatype.A)
|
||||
query_wire = query.to_wire()
|
||||
request = (
|
||||
f"POST {path} HTTP/1.1\r\n"
|
||||
f"Host: {hostname}\r\n"
|
||||
"Accept: application/dns-message\r\n"
|
||||
"Content-Type: application/dns-message\r\n"
|
||||
f"Content-Length: {len(query_wire)}\r\n"
|
||||
"Connection: close\r\n\r\n"
|
||||
).encode("ascii") + query_wire
|
||||
|
||||
try:
|
||||
with _tls_connection(ip, 443, hostname, timeout) as conn:
|
||||
conn.settimeout(timeout)
|
||||
conn.sendall(request)
|
||||
response = b""
|
||||
while True:
|
||||
chunk = conn.recv(4096)
|
||||
if not chunk:
|
||||
break
|
||||
response += chunk
|
||||
|
||||
latency = (time.perf_counter() - started) * 1000
|
||||
status_line, _, body = _parse_http_response(response)
|
||||
status_ok = " 200 " in status_line
|
||||
|
||||
payload_ok = False
|
||||
reason = ""
|
||||
if status_ok and body:
|
||||
try:
|
||||
parsed_dns = dns.message.from_wire(body)
|
||||
payload_ok = parsed_dns.rcode() == dns.rcode.NOERROR and bool(
|
||||
parsed_dns.answer
|
||||
)
|
||||
if payload_ok:
|
||||
reason = "ok"
|
||||
elif parsed_dns.rcode() != dns.rcode.NOERROR:
|
||||
reason = f"dns rcode {dns.rcode.to_text(parsed_dns.rcode())}"
|
||||
else:
|
||||
reason = "empty answer"
|
||||
except Exception: # noqa: BLE001
|
||||
reason = "invalid dns wireformat payload"
|
||||
|
||||
ok = status_ok and payload_ok
|
||||
if not reason:
|
||||
reason = f"http status failed: {status_line}"
|
||||
|
||||
return CheckResult("doh", ok, latency, checked_at, reason)
|
||||
except ssl.SSLCertVerificationError as exc:
|
||||
return CheckResult("doh", False, None, checked_at, f"tls verify failed: {exc}")
|
||||
except Exception as exc: # noqa: BLE001
|
||||
return CheckResult("doh", False, None, checked_at, str(exc))
|
||||
|
||||
|
||||
def _check_dot(ip: str, hostname: str, timeout: float) -> CheckResult:
|
||||
started = time.perf_counter()
|
||||
checked_at = utcnow()
|
||||
query = dns.message.make_query("hnsdoh.com", dns.rdatatype.A)
|
||||
context = ssl.create_default_context()
|
||||
try:
|
||||
response = dns.query.tls(
|
||||
query,
|
||||
where=ip,
|
||||
timeout=timeout,
|
||||
port=853,
|
||||
ssl_context=context,
|
||||
server_hostname=hostname,
|
||||
)
|
||||
latency = (time.perf_counter() - started) * 1000
|
||||
return CheckResult(
|
||||
protocol="dot",
|
||||
ok=bool(response.answer),
|
||||
latency_ms=latency,
|
||||
checked_at=checked_at,
|
||||
reason="ok" if response.answer else "empty answer",
|
||||
)
|
||||
except ssl.SSLCertVerificationError as exc:
|
||||
return CheckResult("dot", False, None, checked_at, f"tls verify failed: {exc}")
|
||||
except Exception as exc: # noqa: BLE001
|
||||
return CheckResult("dot", False, None, checked_at, str(exc))
|
||||
|
||||
|
||||
def check_node(
|
||||
ip: str,
|
||||
hostname: str,
|
||||
doh_path: str,
|
||||
dns_timeout: float,
|
||||
doh_timeout: float,
|
||||
dot_timeout: float,
|
||||
) -> NodeSnapshot:
|
||||
results: dict[ProtocolName, CheckResult] = {
|
||||
"dns_udp": _check_dns_udp(ip, dns_timeout),
|
||||
"dns_tcp": _check_dns_tcp(ip, dns_timeout),
|
||||
"doh": _check_doh(ip, hostname, doh_path, doh_timeout),
|
||||
"dot": _check_dot(ip, hostname, dot_timeout),
|
||||
}
|
||||
return NodeSnapshot(ip=ip, results=results)
|
||||
|
||||
|
||||
def run_full_check(
|
||||
domain: str,
|
||||
doh_path: str,
|
||||
dns_timeout: float,
|
||||
doh_timeout: float,
|
||||
dot_timeout: float,
|
||||
) -> Snapshot:
|
||||
checked_at = utcnow()
|
||||
nodes, discovery_error = discover_nodes(domain)
|
||||
snapshots = [
|
||||
check_node(ip, domain, doh_path, dns_timeout, doh_timeout, dot_timeout)
|
||||
for ip in nodes
|
||||
]
|
||||
return Snapshot(
|
||||
domain=domain,
|
||||
checked_at=checked_at,
|
||||
node_count=len(snapshots),
|
||||
nodes=snapshots,
|
||||
discovery_error=discovery_error,
|
||||
)
|
||||
25
hnsdoh_status/config.py
Normal file
25
hnsdoh_status/config.py
Normal file
@@ -0,0 +1,25 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import os
|
||||
from dataclasses import dataclass
|
||||
|
||||
from dotenv import load_dotenv
|
||||
|
||||
|
||||
load_dotenv()
|
||||
|
||||
|
||||
@dataclass(frozen=True)
|
||||
class Settings:
|
||||
domain: str = os.getenv("HNSDOH_DOMAIN", "hnsdoh.com")
|
||||
doh_path: str = os.getenv("HNSDOH_DOH_PATH", "/dns-query")
|
||||
check_interval_seconds: int = int(os.getenv("HNSDOH_CHECK_INTERVAL_SECONDS", "300"))
|
||||
ui_refresh_seconds: int = int(os.getenv("HNSDOH_UI_REFRESH_SECONDS", "30"))
|
||||
history_size: int = int(os.getenv("HNSDOH_HISTORY_SIZE", "12"))
|
||||
stale_after_seconds: int = int(os.getenv("HNSDOH_STALE_AFTER_SECONDS", "900"))
|
||||
|
||||
dns_timeout_seconds: float = float(os.getenv("HNSDOH_DNS_TIMEOUT_SECONDS", "5"))
|
||||
doh_timeout_seconds: float = float(os.getenv("HNSDOH_DOH_TIMEOUT_SECONDS", "10"))
|
||||
dot_timeout_seconds: float = float(os.getenv("HNSDOH_DOT_TIMEOUT_SECONDS", "10"))
|
||||
|
||||
webhook_url: str = os.getenv("TMP_DISCORD_HOOK", "")
|
||||
40
hnsdoh_status/models.py
Normal file
40
hnsdoh_status/models.py
Normal file
@@ -0,0 +1,40 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from dataclasses import dataclass, field
|
||||
from datetime import datetime
|
||||
from typing import Literal
|
||||
|
||||
ProtocolName = Literal["dns_udp", "dns_tcp", "doh", "dot"]
|
||||
|
||||
|
||||
@dataclass
|
||||
class CheckResult:
|
||||
protocol: ProtocolName
|
||||
ok: bool
|
||||
latency_ms: float | None
|
||||
checked_at: datetime
|
||||
reason: str = ""
|
||||
|
||||
|
||||
@dataclass
|
||||
class NodeSnapshot:
|
||||
ip: str
|
||||
results: dict[ProtocolName, CheckResult] = field(default_factory=dict)
|
||||
|
||||
|
||||
@dataclass
|
||||
class Snapshot:
|
||||
domain: str
|
||||
checked_at: datetime
|
||||
node_count: int
|
||||
nodes: list[NodeSnapshot]
|
||||
discovery_error: str = ""
|
||||
|
||||
|
||||
@dataclass
|
||||
class StatusPayload:
|
||||
generated_at: datetime
|
||||
interval_seconds: int
|
||||
stale_after_seconds: int
|
||||
current: Snapshot | None
|
||||
history: dict[str, list[dict[ProtocolName, CheckResult]]]
|
||||
105
hnsdoh_status/routes.py
Normal file
105
hnsdoh_status/routes.py
Normal file
@@ -0,0 +1,105 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from datetime import datetime, timezone
|
||||
from typing import Any
|
||||
|
||||
from flask import Blueprint, Flask, Response, jsonify, render_template
|
||||
|
||||
from hnsdoh_status.config import Settings
|
||||
from hnsdoh_status.models import CheckResult, ProtocolName, Snapshot
|
||||
from hnsdoh_status.store import StatusStore
|
||||
|
||||
|
||||
def _result_to_dict(result: CheckResult) -> dict[str, Any]:
|
||||
return {
|
||||
"protocol": result.protocol,
|
||||
"ok": result.ok,
|
||||
"latency_ms": round(result.latency_ms, 2)
|
||||
if result.latency_ms is not None
|
||||
else None,
|
||||
"checked_at": result.checked_at.isoformat(),
|
||||
"reason": result.reason,
|
||||
}
|
||||
|
||||
|
||||
def _snapshot_to_dict(snapshot: Snapshot | None) -> dict | None:
|
||||
if snapshot is None:
|
||||
return None
|
||||
|
||||
return {
|
||||
"domain": snapshot.domain,
|
||||
"checked_at": snapshot.checked_at.isoformat(),
|
||||
"node_count": snapshot.node_count,
|
||||
"discovery_error": snapshot.discovery_error,
|
||||
"nodes": [
|
||||
{
|
||||
"ip": node.ip,
|
||||
"results": {
|
||||
protocol: _result_to_dict(result)
|
||||
for protocol, result in node.results.items()
|
||||
},
|
||||
}
|
||||
for node in snapshot.nodes
|
||||
],
|
||||
}
|
||||
|
||||
|
||||
def _history_to_dict(
|
||||
history: dict[str, list[dict[ProtocolName, CheckResult]]],
|
||||
) -> dict[str, list[dict[str, dict]]]:
|
||||
rendered: dict[str, list[dict[str, dict]]] = {}
|
||||
for ip, entries in history.items():
|
||||
rendered[ip] = []
|
||||
for entry in entries:
|
||||
rendered[ip].append(
|
||||
{
|
||||
protocol: _result_to_dict(result)
|
||||
for protocol, result in entry.items()
|
||||
}
|
||||
)
|
||||
return rendered
|
||||
|
||||
|
||||
def create_routes(app: Flask, settings: Settings, store: StatusStore) -> Blueprint:
|
||||
bp = Blueprint("status", __name__)
|
||||
|
||||
@bp.get("/")
|
||||
def index() -> str:
|
||||
return render_template(
|
||||
"index.html",
|
||||
domain=settings.domain,
|
||||
ui_refresh_seconds=settings.ui_refresh_seconds,
|
||||
)
|
||||
|
||||
@bp.get("/api/health")
|
||||
def health() -> tuple[dict, int]:
|
||||
now = datetime.now(timezone.utc)
|
||||
current = store.current()
|
||||
|
||||
if current is None:
|
||||
return {"ok": False, "reason": "No checks completed yet."}, 503
|
||||
|
||||
age_seconds = (now - current.checked_at).total_seconds()
|
||||
stale = age_seconds > settings.stale_after_seconds
|
||||
status_code = 200 if not stale else 503
|
||||
return {
|
||||
"ok": not stale,
|
||||
"stale": stale,
|
||||
"age_seconds": age_seconds,
|
||||
"checked_at": current.checked_at.isoformat(),
|
||||
}, status_code
|
||||
|
||||
@bp.get("/api/status")
|
||||
def status() -> Response:
|
||||
return jsonify(
|
||||
{
|
||||
"generated_at": store.generated_at().isoformat(),
|
||||
"interval_seconds": settings.check_interval_seconds,
|
||||
"stale_after_seconds": settings.stale_after_seconds,
|
||||
"current": _snapshot_to_dict(store.current()),
|
||||
"history": _history_to_dict(store.history()),
|
||||
}
|
||||
)
|
||||
|
||||
app.register_blueprint(bp)
|
||||
return bp
|
||||
34
hnsdoh_status/scheduler.py
Normal file
34
hnsdoh_status/scheduler.py
Normal file
@@ -0,0 +1,34 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from apscheduler.schedulers.background import BackgroundScheduler
|
||||
|
||||
from hnsdoh_status.checks import run_full_check
|
||||
from hnsdoh_status.config import Settings
|
||||
from hnsdoh_status.store import StatusStore
|
||||
|
||||
|
||||
def create_scheduler(settings: Settings, store: StatusStore) -> BackgroundScheduler:
|
||||
scheduler = BackgroundScheduler(daemon=True)
|
||||
|
||||
def run_checks() -> None:
|
||||
snapshot = run_full_check(
|
||||
domain=settings.domain,
|
||||
doh_path=settings.doh_path,
|
||||
dns_timeout=settings.dns_timeout_seconds,
|
||||
doh_timeout=settings.doh_timeout_seconds,
|
||||
dot_timeout=settings.dot_timeout_seconds,
|
||||
)
|
||||
store.update(snapshot)
|
||||
|
||||
# Run once on startup so the UI/API is populated immediately.
|
||||
run_checks()
|
||||
|
||||
scheduler.add_job(
|
||||
run_checks,
|
||||
"interval",
|
||||
seconds=settings.check_interval_seconds,
|
||||
id="node-health-check",
|
||||
max_instances=1,
|
||||
coalesce=True,
|
||||
)
|
||||
return scheduler
|
||||
75
hnsdoh_status/static/app.js
Normal file
75
hnsdoh_status/static/app.js
Normal file
@@ -0,0 +1,75 @@
|
||||
const refreshSeconds = window.HNSDOH_UI_REFRESH_SECONDS || 30;
|
||||
|
||||
function badgeFor(result) {
|
||||
const badgeClass = result.ok ? "badge badge-ok" : "badge badge-bad";
|
||||
const label = result.ok ? "UP" : "DOWN";
|
||||
const latency = result.latency_ms === null ? "" : ` (${result.latency_ms} ms)`;
|
||||
const reason = result.reason || "";
|
||||
|
||||
return `
|
||||
<span class="${badgeClass}">${label}${latency}</span>
|
||||
<span class="hint">${reason}</span>
|
||||
`;
|
||||
}
|
||||
|
||||
function historyDots(history, protocol) {
|
||||
if (!history || history.length === 0) {
|
||||
return "";
|
||||
}
|
||||
|
||||
const recent = history.slice(-10);
|
||||
const dots = recent
|
||||
.map((entry) => {
|
||||
const r = entry[protocol];
|
||||
if (!r) return '<span class="dot"></span>';
|
||||
return `<span class="dot ${r.ok ? "ok" : "bad"}"></span>`;
|
||||
})
|
||||
.join("");
|
||||
|
||||
return `<div class="history">${dots}</div>`;
|
||||
}
|
||||
|
||||
function rowForNode(node, history) {
|
||||
const udp = node.results.dns_udp;
|
||||
const tcp = node.results.dns_tcp;
|
||||
const doh = node.results.doh;
|
||||
const dot = node.results.dot;
|
||||
|
||||
return `
|
||||
<tr>
|
||||
<td>${node.ip}</td>
|
||||
<td>${badgeFor(udp)}${historyDots(history, "dns_udp")}</td>
|
||||
<td>${badgeFor(tcp)}${historyDots(history, "dns_tcp")}</td>
|
||||
<td>${badgeFor(doh)}${historyDots(history, "doh")}</td>
|
||||
<td>${badgeFor(dot)}${historyDots(history, "dot")}</td>
|
||||
</tr>
|
||||
`;
|
||||
}
|
||||
|
||||
async function refreshStatus() {
|
||||
try {
|
||||
const response = await fetch("/api/status", { cache: "no-store" });
|
||||
const data = await response.json();
|
||||
const tableBody = document.getElementById("status-table-body");
|
||||
const lastUpdated = document.getElementById("last-updated");
|
||||
|
||||
if (!data.current) {
|
||||
tableBody.innerHTML = '<tr><td colspan="5">No data yet. Waiting for first check.</td></tr>';
|
||||
return;
|
||||
}
|
||||
|
||||
lastUpdated.textContent = `Last updated: ${data.current.checked_at}`;
|
||||
|
||||
const rows = data.current.nodes
|
||||
.map((node) => rowForNode(node, data.history[node.ip] || []))
|
||||
.join("");
|
||||
|
||||
tableBody.innerHTML = rows || '<tr><td colspan="5">No nodes discovered.</td></tr>';
|
||||
} catch (error) {
|
||||
const tableBody = document.getElementById("status-table-body");
|
||||
tableBody.innerHTML = `<tr><td colspan="5">Failed to load status: ${error}</td></tr>`;
|
||||
}
|
||||
}
|
||||
|
||||
refreshStatus();
|
||||
setInterval(refreshStatus, refreshSeconds * 1000);
|
||||
BIN
hnsdoh_status/static/icons/HNS.png
Normal file
BIN
hnsdoh_status/static/icons/HNS.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 22 KiB |
BIN
hnsdoh_status/static/icons/HNSW.png
Normal file
BIN
hnsdoh_status/static/icons/HNSW.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 29 KiB |
189
hnsdoh_status/static/style.css
Normal file
189
hnsdoh_status/static/style.css
Normal file
@@ -0,0 +1,189 @@
|
||||
:root {
|
||||
--bg: #f0f7f4;
|
||||
--paper: #ffffff;
|
||||
--ink: #16302b;
|
||||
--muted: #4b635d;
|
||||
--accent: #1f7a8c;
|
||||
--ok: #12824c;
|
||||
--bad: #ba2d0b;
|
||||
--border: #d6e8e1;
|
||||
}
|
||||
|
||||
@media (prefers-color-scheme: dark) {
|
||||
:root {
|
||||
--bg: #09131a;
|
||||
--paper: #10222c;
|
||||
--ink: #e3f2f6;
|
||||
--muted: #93acb6;
|
||||
--accent: #69bfd6;
|
||||
--ok: #62d387;
|
||||
--bad: #ff8c73;
|
||||
--border: #214250;
|
||||
}
|
||||
}
|
||||
|
||||
* {
|
||||
box-sizing: border-box;
|
||||
}
|
||||
|
||||
body {
|
||||
margin: 0;
|
||||
font-family: "IBM Plex Sans", "Segoe UI", sans-serif;
|
||||
color: var(--ink);
|
||||
background:
|
||||
radial-gradient(circle at 10% 10%, #cfeadf 0, transparent 32%),
|
||||
radial-gradient(circle at 90% 0%, #b4d6e3 0, transparent 28%),
|
||||
var(--bg);
|
||||
}
|
||||
|
||||
@media (prefers-color-scheme: dark) {
|
||||
body {
|
||||
background:
|
||||
radial-gradient(circle at 10% 10%, #143342 0, transparent 35%),
|
||||
radial-gradient(circle at 90% 0%, #233a46 0, transparent 30%),
|
||||
var(--bg);
|
||||
}
|
||||
}
|
||||
|
||||
.layout {
|
||||
max-width: 1100px;
|
||||
margin: 2rem auto;
|
||||
padding: 0 1rem 2rem;
|
||||
}
|
||||
|
||||
.hero {
|
||||
background: linear-gradient(130deg, #e2f4eb 0%, #e7f0ff 100%);
|
||||
border: 1px solid var(--border);
|
||||
border-radius: 14px;
|
||||
padding: 1rem 1.25rem;
|
||||
}
|
||||
|
||||
@media (prefers-color-scheme: dark) {
|
||||
.hero {
|
||||
background: linear-gradient(135deg, #123342 0%, #182f3c 100%);
|
||||
}
|
||||
}
|
||||
|
||||
.hero h1 {
|
||||
margin: 0;
|
||||
font-size: 1.8rem;
|
||||
}
|
||||
|
||||
.hero p {
|
||||
margin-top: 0.3rem;
|
||||
color: var(--muted);
|
||||
}
|
||||
|
||||
.meta {
|
||||
display: flex;
|
||||
gap: 1rem;
|
||||
font-size: 0.9rem;
|
||||
color: var(--muted);
|
||||
}
|
||||
|
||||
.panel {
|
||||
margin-top: 1rem;
|
||||
background: var(--paper);
|
||||
border: 1px solid var(--border);
|
||||
border-radius: 14px;
|
||||
overflow: hidden;
|
||||
}
|
||||
|
||||
table {
|
||||
width: 100%;
|
||||
border-collapse: collapse;
|
||||
}
|
||||
|
||||
th,
|
||||
td {
|
||||
text-align: left;
|
||||
padding: 0.75rem;
|
||||
border-bottom: 1px solid var(--border);
|
||||
}
|
||||
|
||||
th {
|
||||
font-size: 0.9rem;
|
||||
color: var(--muted);
|
||||
letter-spacing: 0.02em;
|
||||
}
|
||||
|
||||
.badge {
|
||||
display: inline-flex;
|
||||
align-items: center;
|
||||
gap: 0.4rem;
|
||||
padding: 0.3rem 0.55rem;
|
||||
border-radius: 999px;
|
||||
font-size: 0.8rem;
|
||||
border: 1px solid transparent;
|
||||
}
|
||||
|
||||
.badge-ok {
|
||||
color: var(--ok);
|
||||
background: #e7f7ef;
|
||||
border-color: #b6e3ca;
|
||||
}
|
||||
|
||||
@media (prefers-color-scheme: dark) {
|
||||
.badge-ok {
|
||||
background: #173b2a;
|
||||
border-color: #2f6c4f;
|
||||
}
|
||||
}
|
||||
|
||||
.badge-bad {
|
||||
color: var(--bad);
|
||||
background: #fdece7;
|
||||
border-color: #f3b9aa;
|
||||
}
|
||||
|
||||
@media (prefers-color-scheme: dark) {
|
||||
.badge-bad {
|
||||
background: #472118;
|
||||
border-color: #855040;
|
||||
}
|
||||
}
|
||||
|
||||
.hint {
|
||||
display: block;
|
||||
margin-top: 0.15rem;
|
||||
color: var(--muted);
|
||||
font-size: 0.75rem;
|
||||
}
|
||||
|
||||
.history {
|
||||
margin-top: 0.35rem;
|
||||
display: flex;
|
||||
gap: 0.2rem;
|
||||
}
|
||||
|
||||
.dot {
|
||||
width: 7px;
|
||||
height: 7px;
|
||||
border-radius: 50%;
|
||||
background: #adbcb6;
|
||||
}
|
||||
|
||||
.dot.ok {
|
||||
background: var(--ok);
|
||||
}
|
||||
|
||||
.dot.bad {
|
||||
background: var(--bad);
|
||||
}
|
||||
|
||||
@media (prefers-color-scheme: dark) {
|
||||
th,
|
||||
td {
|
||||
border-bottom-color: #1f3f4c;
|
||||
}
|
||||
}
|
||||
|
||||
@media (max-width: 880px) {
|
||||
.panel {
|
||||
overflow-x: auto;
|
||||
}
|
||||
|
||||
table {
|
||||
min-width: 760px;
|
||||
}
|
||||
}
|
||||
30
hnsdoh_status/store.py
Normal file
30
hnsdoh_status/store.py
Normal file
@@ -0,0 +1,30 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from collections import defaultdict, deque
|
||||
from datetime import datetime, timezone
|
||||
|
||||
from hnsdoh_status.models import CheckResult, ProtocolName, Snapshot
|
||||
|
||||
|
||||
class StatusStore:
|
||||
def __init__(self, history_size: int):
|
||||
self._history_size = history_size
|
||||
self._current: Snapshot | None = None
|
||||
self._history: dict[str, deque[dict[ProtocolName, CheckResult]]] = defaultdict(
|
||||
lambda: deque(maxlen=self._history_size)
|
||||
)
|
||||
|
||||
def update(self, snapshot: Snapshot) -> None:
|
||||
self._current = snapshot
|
||||
for node in snapshot.nodes:
|
||||
# Each history entry stores one full protocol result set for the node.
|
||||
self._history[node.ip].append(dict(node.results))
|
||||
|
||||
def current(self) -> Snapshot | None:
|
||||
return self._current
|
||||
|
||||
def history(self) -> dict[str, list[dict[ProtocolName, CheckResult]]]:
|
||||
return {ip: list(entries) for ip, entries in self._history.items()}
|
||||
|
||||
def generated_at(self) -> datetime:
|
||||
return datetime.now(timezone.utc)
|
||||
56
hnsdoh_status/templates/index.html
Normal file
56
hnsdoh_status/templates/index.html
Normal file
@@ -0,0 +1,56 @@
|
||||
<!doctype html>
|
||||
<html lang="en">
|
||||
<head>
|
||||
<meta charset="utf-8" />
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1" />
|
||||
<title>HNSDoH Status</title>
|
||||
<link
|
||||
rel="icon"
|
||||
type="image/png"
|
||||
href="{{ url_for('static', filename='icons/HNS.png') }}"
|
||||
/>
|
||||
<link
|
||||
rel="icon"
|
||||
type="image/png"
|
||||
media="(prefers-color-scheme: dark)"
|
||||
href="{{ url_for('static', filename='icons/HNSW.png') }}"
|
||||
/>
|
||||
<link
|
||||
rel="apple-touch-icon"
|
||||
href="{{ url_for('static', filename='icons/HNS.png') }}"
|
||||
/>
|
||||
<link rel="stylesheet" href="{{ url_for('static', filename='style.css') }}" />
|
||||
</head>
|
||||
<body>
|
||||
<main class="layout">
|
||||
<header class="hero">
|
||||
<h1>HNSDoH Status</h1>
|
||||
<p>Live checks for {{ domain }} nodes discovered from DNS A records.</p>
|
||||
<div class="meta">
|
||||
<span id="last-updated">Last updated: pending</span>
|
||||
<span>Refresh: {{ ui_refresh_seconds }}s</span>
|
||||
</div>
|
||||
</header>
|
||||
|
||||
<section class="panel">
|
||||
<table>
|
||||
<thead>
|
||||
<tr>
|
||||
<th>Node IP</th>
|
||||
<th>DNS UDP :53</th>
|
||||
<th>DNS TCP :53</th>
|
||||
<th>DoH :443</th>
|
||||
<th>DoT :853</th>
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody id="status-table-body"></tbody>
|
||||
</table>
|
||||
</section>
|
||||
</main>
|
||||
|
||||
<script>
|
||||
window.HNSDOH_UI_REFRESH_SECONDS = {{ ui_refresh_seconds }};
|
||||
</script>
|
||||
<script src="{{ url_for('static', filename='app.js') }}"></script>
|
||||
</body>
|
||||
</html>
|
||||
Reference in New Issue
Block a user