This commit is contained in:
parent
cb4c94a0b4
commit
2530c2ce1c
67
main.py
67
main.py
@ -1,12 +1,14 @@
|
||||
import time
|
||||
import signal
|
||||
import threading
|
||||
from flask import Flask
|
||||
from server import app
|
||||
import server
|
||||
from gunicorn.app.base import BaseApplication
|
||||
import os
|
||||
import dotenv
|
||||
import sys
|
||||
import json
|
||||
import concurrent.futures
|
||||
import schedule
|
||||
|
||||
|
||||
class GunicornApp(BaseApplication):
|
||||
@ -23,20 +25,63 @@ class GunicornApp(BaseApplication):
|
||||
def load(self):
|
||||
return self.application
|
||||
|
||||
if __name__ == '__main__':
|
||||
workers = os.getenv('WORKERS')
|
||||
threads = os.getenv('THREADS')
|
||||
if workers is None:
|
||||
workers = 1
|
||||
if threads is None:
|
||||
threads = 2
|
||||
|
||||
def check():
|
||||
print('Checking nodes...', flush=True)
|
||||
server.check_nodes()
|
||||
|
||||
|
||||
def run_scheduler(stop_event):
|
||||
schedule.every(1).minutes.do(check)
|
||||
while not stop_event.is_set():
|
||||
schedule.run_pending()
|
||||
time.sleep(1)
|
||||
|
||||
|
||||
def run_gunicorn():
|
||||
workers = os.getenv('WORKERS', 1)
|
||||
threads = os.getenv('THREADS', 2)
|
||||
workers = int(workers)
|
||||
threads = int(threads)
|
||||
|
||||
options = {
|
||||
'bind': '0.0.0.0:5000',
|
||||
'workers': workers,
|
||||
'threads': threads,
|
||||
}
|
||||
gunicorn_app = GunicornApp(app, options)
|
||||
print('Starting server with ' + str(workers) + ' workers and ' + str(threads) + ' threads', flush=True)
|
||||
|
||||
gunicorn_app = GunicornApp(server.app, options)
|
||||
print(f'Starting server with {workers} workers and {threads} threads', flush=True)
|
||||
gunicorn_app.run()
|
||||
|
||||
|
||||
def signal_handler(sig, frame):
|
||||
print("Shutting down gracefully...", flush=True)
|
||||
stop_event.set()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
dotenv.load_dotenv()
|
||||
|
||||
stop_event = threading.Event()
|
||||
signal.signal(signal.SIGINT, signal_handler)
|
||||
signal.signal(signal.SIGTERM, signal_handler)
|
||||
|
||||
with concurrent.futures.ThreadPoolExecutor() as executor:
|
||||
# Start the scheduler
|
||||
scheduler_future = executor.submit(run_scheduler, stop_event)
|
||||
|
||||
try:
|
||||
# Run the Gunicorn server
|
||||
run_gunicorn()
|
||||
except KeyboardInterrupt:
|
||||
print("Shutting down server...", flush=True)
|
||||
finally:
|
||||
stop_event.set()
|
||||
scheduler_future.cancel()
|
||||
try:
|
||||
scheduler_future.result(timeout=5)
|
||||
except concurrent.futures.CancelledError:
|
||||
print("Scheduler stopped.")
|
||||
except Exception as e:
|
||||
print(f"Scheduler did not stop cleanly: {e}")
|
||||
|
229
server.py
229
server.py
@ -1,3 +1,4 @@
|
||||
from collections import defaultdict
|
||||
from functools import cache
|
||||
import json
|
||||
from flask import (
|
||||
@ -207,7 +208,6 @@ def verify_cert(ip: str,port:int) -> bool:
|
||||
|
||||
# Convert the expiry date string to a datetime object
|
||||
expiry_date = datetime.strptime(expiry_date_str, '%b %d %H:%M:%S %Y GMT')
|
||||
print(expiry_date)
|
||||
expires = format_relative_time(expiry_date)
|
||||
valid = expiry_date > datetime.now()
|
||||
|
||||
@ -234,6 +234,52 @@ def format_relative_time(expiry_date: datetime) -> str:
|
||||
return f"in {minutes} minutes" if minutes > 1 else "in 1 minute"
|
||||
else:
|
||||
return f"in {delta.seconds} seconds" if delta.seconds > 1 else "in 1 second"
|
||||
|
||||
def check_nodes() -> list:
|
||||
global nodes
|
||||
if last_log > datetime.now() - relativedelta.relativedelta(minutes=1):
|
||||
# Load the last log
|
||||
with open(f"{log_dir}/node_status.json", "r") as file:
|
||||
data = json.load(file)
|
||||
newest = {"date": datetime.now()-relativedelta.relativedelta(years=1), "nodes": []}
|
||||
for entry in data:
|
||||
if datetime.strptime(entry["date"], "%Y-%m-%d %H:%M:%S") > newest["date"]:
|
||||
newest = entry
|
||||
newest["date"] = datetime.strptime(newest["date"], "%Y-%m-%d %H:%M:%S")
|
||||
node_status = newest["nodes"]
|
||||
else:
|
||||
if len(nodes) == 0:
|
||||
nodes = get_node_list()
|
||||
node_status = []
|
||||
for ip in nodes:
|
||||
node_status.append({
|
||||
"ip": ip,
|
||||
"name": node_names[ip] if ip in node_names else ip,
|
||||
"location": node_locations[ip] if ip in node_locations else "Unknown",
|
||||
"plain_dns": check_plain_dns(ip),
|
||||
"doh": check_doh(ip),
|
||||
"dot": check_dot(ip),
|
||||
"cert": verify_cert(ip,443),
|
||||
"cert_853": verify_cert(ip,853)
|
||||
})
|
||||
else:
|
||||
node_status = []
|
||||
for ip in nodes:
|
||||
node_status.append({
|
||||
"ip": ip,
|
||||
"name": node_names[ip] if ip in node_names else ip,
|
||||
"location": node_locations[ip] if ip in node_locations else "Unknown",
|
||||
"plain_dns": check_plain_dns(ip),
|
||||
"doh": check_doh(ip),
|
||||
"dot": check_dot(ip),
|
||||
"cert": verify_cert(ip,443),
|
||||
"cert_853": verify_cert(ip,853)
|
||||
})
|
||||
# Save the node status to a file
|
||||
log_status(node_status)
|
||||
print("Finished checking nodes", flush=True)
|
||||
return node_status
|
||||
|
||||
# endregion
|
||||
|
||||
# region File logs
|
||||
@ -275,8 +321,128 @@ def log_status(node_status: list):
|
||||
with open(filename, "w") as file:
|
||||
json.dump(data, file, indent=4)
|
||||
|
||||
|
||||
|
||||
# endregion
|
||||
# region History functions
|
||||
def get_history(days: int) -> list:
|
||||
log_files = [f for f in os.listdir(log_dir) if f.endswith('.json') and f.startswith('node_status')]
|
||||
history = []
|
||||
|
||||
for log_file in log_files:
|
||||
file_path = os.path.join(log_dir, log_file)
|
||||
with open(file_path, 'r') as file:
|
||||
data = json.load(file)
|
||||
for entry in data:
|
||||
entry_date = datetime.strptime(entry["date"], "%Y-%m-%d %H:%M:%S")
|
||||
if datetime.now() - relativedelta.relativedelta(days=days) < entry_date:
|
||||
history.append(entry)
|
||||
return history
|
||||
|
||||
def summarize_history(history: list) -> dict:
|
||||
nodes_status = defaultdict(lambda: {
|
||||
"name": "",
|
||||
"location": "",
|
||||
"plain_dns": {"last_down": "Never", "percentage": 0},
|
||||
"doh": {"last_down": "Never", "percentage": 0},
|
||||
"dot": {"last_down": "Never", "percentage": 0}
|
||||
})
|
||||
overall_status = {
|
||||
"plain_dns": {"last_down": "Never", "percentage": 0},
|
||||
"doh": {"last_down": "Never", "percentage": 0},
|
||||
"dot": {"last_down": "Never", "percentage": 0}
|
||||
}
|
||||
|
||||
# Collect data
|
||||
total_counts = defaultdict(lambda: {
|
||||
"plain_dns": {"down": 0, "total": 0},
|
||||
"doh": {"down": 0, "total": 0},
|
||||
"dot": {"down": 0, "total": 0}
|
||||
})
|
||||
|
||||
for entry in history:
|
||||
date = datetime.strptime(entry["date"], "%Y-%m-%d %H:%M:%S")
|
||||
for node in entry["nodes"]:
|
||||
ip = node["ip"]
|
||||
# Update node details if not already present
|
||||
if nodes_status[ip]["name"] == "":
|
||||
nodes_status[ip]["name"] = node.get("name", "")
|
||||
nodes_status[ip]["location"] = node.get("location", "")
|
||||
|
||||
# Update counts and last downtime
|
||||
for key in ["plain_dns", "doh", "dot"]:
|
||||
status = node.get(key, "up")
|
||||
if status == "down":
|
||||
total_counts[ip][key]["down"] += 1
|
||||
total_counts[ip][key]["total"] += 1
|
||||
|
||||
# Update last downtime for each key
|
||||
for key in ["plain_dns", "doh", "dot"]:
|
||||
if node.get(key) == "down":
|
||||
nodes_status[ip][key]["last_down"] = date.strftime("%Y-%m-%d %H:%M:%S")
|
||||
|
||||
# Calculate percentages and prepare final summary
|
||||
node_list = []
|
||||
for ip, status in nodes_status.items():
|
||||
node_data = status.copy()
|
||||
for key in ["plain_dns", "doh", "dot"]:
|
||||
total = total_counts[ip][key]["total"]
|
||||
down = total_counts[ip][key]["down"]
|
||||
if total > 0:
|
||||
node_data[key]["percentage"] = ((total - down) / total) * 100
|
||||
else:
|
||||
node_data[key]["percentage"] = 100
|
||||
node_list.append(node_data)
|
||||
|
||||
# Aggregate overall status
|
||||
overall_counts = {"plain_dns": {"down": 0, "total": 0}, "doh": {"down": 0, "total": 0}, "dot": {"down": 0, "total": 0}}
|
||||
for ip, counts in total_counts.items():
|
||||
for key in ["plain_dns", "doh", "dot"]:
|
||||
overall_counts[key]["total"] += counts[key]["total"]
|
||||
overall_counts[key]["down"] += counts[key]["down"]
|
||||
|
||||
for key in ["plain_dns", "doh", "dot"]:
|
||||
total = overall_counts[key]["total"]
|
||||
down = overall_counts[key]["down"]
|
||||
if total > 0:
|
||||
overall_status[key]["percentage"] = ((total - down) / total) * 100
|
||||
last_downs = [nodes_status[ip][key]["last_down"] for ip in nodes_status if nodes_status[ip][key]["last_down"] != "Never"]
|
||||
if last_downs:
|
||||
overall_status[key]["last_down"] = max(last_downs)
|
||||
else:
|
||||
overall_status[key]["percentage"] = 100
|
||||
|
||||
return {
|
||||
"nodes": node_list,
|
||||
"overall": overall_status,
|
||||
"check_counts": total_counts
|
||||
}
|
||||
def convert_nodes_to_dict(nodes):
|
||||
nodes_dict = {}
|
||||
for node in nodes:
|
||||
name = node.get('name')
|
||||
if name:
|
||||
nodes_dict[name] = node
|
||||
return nodes_dict
|
||||
|
||||
# endregion
|
||||
|
||||
|
||||
# region API routes
|
||||
@app.route("/api/nodes")
|
||||
def api_nodes():
|
||||
node_status = check_nodes()
|
||||
return jsonify(node_status)
|
||||
|
||||
@app.route("/api/history")
|
||||
def api_history():
|
||||
history_days = 7
|
||||
if "days" in request.args:
|
||||
try:
|
||||
history_days = int(request.args["days"])
|
||||
except:
|
||||
pass
|
||||
history = get_history(history_days)
|
||||
history_summary = summarize_history(history)
|
||||
return jsonify(history_summary)
|
||||
|
||||
# endregion
|
||||
|
||||
@ -284,48 +450,8 @@ def log_status(node_status: list):
|
||||
# region Main routes
|
||||
@app.route("/")
|
||||
def index():
|
||||
global nodes
|
||||
|
||||
if last_log > datetime.now() - relativedelta.relativedelta(minutes=1):
|
||||
# Load the last log
|
||||
with open(f"{log_dir}/node_status.json", "r") as file:
|
||||
data = json.load(file)
|
||||
newest = {"date": datetime.now()-relativedelta.relativedelta(years=1), "nodes": []}
|
||||
for entry in data:
|
||||
if datetime.strptime(entry["date"], "%Y-%m-%d %H:%M:%S") > newest["date"]:
|
||||
newest = entry
|
||||
newest["date"] = datetime.strptime(newest["date"], "%Y-%m-%d %H:%M:%S")
|
||||
node_status = newest["nodes"]
|
||||
else:
|
||||
if len(nodes) == 0:
|
||||
nodes = get_node_list()
|
||||
node_status = []
|
||||
for ip in nodes:
|
||||
node_status.append({
|
||||
"ip": ip,
|
||||
"name": node_names[ip] if ip in node_names else ip,
|
||||
"location": node_locations[ip] if ip in node_locations else "Unknown",
|
||||
"plain_dns": check_plain_dns(ip),
|
||||
"doh": check_doh(ip),
|
||||
"dot": check_dot(ip),
|
||||
"cert": verify_cert(ip,443),
|
||||
"cert_853": verify_cert(ip,853)
|
||||
})
|
||||
else:
|
||||
node_status = []
|
||||
for ip in nodes:
|
||||
node_status.append({
|
||||
"ip": ip,
|
||||
"name": node_names[ip] if ip in node_names else ip,
|
||||
"location": node_locations[ip] if ip in node_locations else "Unknown",
|
||||
"plain_dns": check_plain_dns(ip),
|
||||
"doh": check_doh(ip),
|
||||
"dot": check_dot(ip),
|
||||
"cert": verify_cert(ip,443),
|
||||
"cert_853": verify_cert(ip,853)
|
||||
})
|
||||
# Save the node status to a file
|
||||
log_status(node_status)
|
||||
node_status = check_nodes()
|
||||
|
||||
|
||||
warnings = []
|
||||
for node in node_status:
|
||||
@ -346,7 +472,18 @@ def index():
|
||||
if cert_853_expiry < datetime.now() + relativedelta.relativedelta(days=7):
|
||||
warnings.append(f"{node['name']} has a certificate expiring in less than 7 days on port 853")
|
||||
|
||||
return render_template("index.html",nodes=node_status,warnings=warnings)
|
||||
history_days = 7
|
||||
if "history" in request.args:
|
||||
try:
|
||||
history_days = int(request.args["history"])
|
||||
except:
|
||||
pass
|
||||
history = get_history(history_days)
|
||||
history_summary = summarize_history(history)
|
||||
history_summary["nodes"] = convert_nodes_to_dict(history_summary["nodes"])
|
||||
last_check = last_log.strftime("%Y-%m-%d %H:%M:%S")
|
||||
print(f"Last check: {last_check}", flush=True)
|
||||
return render_template("index.html",nodes=node_status,warnings=warnings,history=history_summary,last_check=last_check)
|
||||
|
||||
|
||||
@app.route("/<path:path>")
|
||||
|
18
templates/assets/css/index.css
Normal file
18
templates/assets/css/index.css
Normal file
@ -0,0 +1,18 @@
|
||||
.node {
|
||||
margin: 25px;
|
||||
border: solid;
|
||||
border-radius: 20px;
|
||||
padding-inline: 50px;
|
||||
max-width: 90%;
|
||||
}
|
||||
|
||||
.spacer {
|
||||
margin: 25px;
|
||||
display: block;
|
||||
}
|
||||
.warnings {
|
||||
margin: auto;
|
||||
width: 1000px;
|
||||
max-width: 95%;
|
||||
margin-top: 20px;
|
||||
}
|
@ -33,6 +33,7 @@
|
||||
<link rel="stylesheet" href="assets/css/bs-theme-overrides.css">
|
||||
<link rel="stylesheet" href="assets/css/Navbar-Right-Links-Dark-icons.css">
|
||||
<link rel="stylesheet" href="assets/css/Team-images.css">
|
||||
<link rel="stylesheet" href="assets/css/index.css">
|
||||
|
||||
<script async src="https://umami.woodburn.au/script.js"
|
||||
data-website-id="7e0ed7e4-3858-4124-a574-b57ac05aaad1"></script>
|
||||
@ -49,7 +50,7 @@
|
||||
<section id="intro">
|
||||
<div class="text-center">
|
||||
<h1 class="text-center" style="font-size: 60px;">HNS DoH</h1>
|
||||
<div id="warnings" style="margin-top: 20px;">
|
||||
<div class="warnings">
|
||||
<!-- Check if warnings is empty -->
|
||||
{% if warnings %}
|
||||
<div class="alert alert-danger" role="alert">
|
||||
@ -65,10 +66,24 @@
|
||||
</section>
|
||||
|
||||
<section id="status"></section>
|
||||
<div class="text-center">
|
||||
<div class="text-center" style="width: fit-content;margin: auto;max-width: 100%;">
|
||||
<h1 class="text-center" style="font-size: 60px;">Nodes</h1>
|
||||
{% for node in nodes %}
|
||||
<span style="font-size: smaller;margin-bottom: 10px;">Last check: {{last_check}}</span>
|
||||
<div class="spacer"></div>
|
||||
<div class="node" style="display: block;">
|
||||
<div>
|
||||
<h2 style="display: inline;">Overall</h2>
|
||||
</div>
|
||||
<div>
|
||||
<h5>Stats</h5>
|
||||
<p>Plain DNS: {{history.overall.plain_dns.percentage}}% uptime (last down {{history.overall.plain_dns.last_down}})</p>
|
||||
<p>DNS over HTTPS: {{history.overall.doh.percentage}}% uptime (last down {{history.overall.doh.last_down}})</p>
|
||||
<p>DNS over TLS: {{history.overall.dot.percentage}}% uptime (last down {{history.overall.dot.last_down}})</p>
|
||||
</div>
|
||||
</div>
|
||||
<div class="spacer"></div>
|
||||
{% for node in nodes %}
|
||||
<div class="node">
|
||||
<div>
|
||||
<h2 style="display: inline;">{{node.location}}</h2>
|
||||
<h4 style="display: inline;">({{ node.name }})</h4>
|
||||
@ -79,7 +94,13 @@
|
||||
<p>DNS over HTTPS: {{node.doh}}</p>
|
||||
<p>DNS over TLS: {{node.dot}}</p>
|
||||
<p>Certificate: {{node.cert.valid}} (expires {{node.cert.expires}})</p>
|
||||
</div>
|
||||
</div>
|
||||
<div>
|
||||
<h5>Stats</h5>
|
||||
<p>Plain DNS: {{history.nodes[node.name].plain_dns.percentage}}% uptime (last down {{history.nodes[node.name].plain_dns.last_down}})</p>
|
||||
<p>DNS over HTTPS: {{history.nodes[node.name].doh.percentage}}% uptime (last down {{history.nodes[node.name].doh.last_down}})</p>
|
||||
<p>DNS over TLS: {{history.nodes[node.name].dot.percentage}}% uptime (last down {{history.nodes[node.name].dot.last_down}})</p>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
{% endfor %}
|
||||
|
Loading…
Reference in New Issue
Block a user