Some checks are pending
NordaBiz Tests / E2E Tests (Playwright) (push) Blocked by required conditions
NordaBiz Tests / Smoke Tests (Production) (push) Blocked by required conditions
NordaBiz Tests / Unit & Integration Tests (push) Waiting to run
NordaBiz Tests / Send Failure Notification (push) Blocked by required conditions
External monitoring via UptimeRobot (free tier) with internal health logger to differentiate ISP outages from server issues. Includes: - 4 new DB models (UptimeMonitor, UptimeCheck, UptimeIncident, InternalHealthLog) - Migration 082 with tables, indexes, and permissions - Internal health logger script (cron */5 min) - UptimeRobot sync script (cron hourly) with automatic cause correlation - Admin dashboard /admin/uptime with uptime %, response time charts, incident log with editable notes/causes, pattern analysis, monthly report - SLA comparison table (99.9%/99.5%/99%) Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
1014 lines
41 KiB
Python
1014 lines
41 KiB
Python
"""
|
|
Admin Status Routes
|
|
===================
|
|
|
|
System status, health check, and debug panel routes.
|
|
"""
|
|
|
|
import json
|
|
import logging
|
|
import subprocess
|
|
import platform
|
|
from datetime import datetime, timedelta
|
|
|
|
from flask import render_template, request, redirect, url_for, flash, jsonify, Response
|
|
from flask_login import login_required, current_user
|
|
from sqlalchemy import func, text
|
|
|
|
from . import bp
|
|
from database import (
|
|
SessionLocal, Company, User, AuditLog, SecurityAlert,
|
|
CompanySocialMedia, CompanyWebsiteAnalysis, SystemRole,
|
|
UptimeMonitor, UptimeCheck, UptimeIncident, InternalHealthLog
|
|
)
|
|
from utils.decorators import role_required
|
|
|
|
logger = logging.getLogger(__name__)
|
|
|
|
|
|
# ============================================================
|
|
# SYSTEM STATUS DASHBOARD
|
|
# ============================================================
|
|
|
|
@bp.route('/status')
|
|
@login_required
|
|
@role_required(SystemRole.OFFICE_MANAGER)
|
|
def admin_status():
|
|
"""System status dashboard with real-time metrics"""
|
|
db = SessionLocal()
|
|
try:
|
|
# Current timestamp
|
|
now = datetime.now()
|
|
|
|
# ===== SYSTEM METRICS =====
|
|
system_metrics = {
|
|
'hostname': platform.node(),
|
|
'os': f"{platform.system()} {platform.release()}",
|
|
'python': platform.python_version(),
|
|
}
|
|
|
|
# CPU usage (via top command)
|
|
try:
|
|
result = subprocess.run(['top', '-l', '1', '-n', '0'], capture_output=True, text=True, timeout=5)
|
|
for line in result.stdout.split('\n'):
|
|
if 'CPU usage' in line:
|
|
# Parse: "CPU usage: 5.88% user, 8.82% sys, 85.29% idle"
|
|
parts = line.split(':')[1].strip().split(',')
|
|
user = float(parts[0].replace('% user', '').strip())
|
|
sys_cpu = float(parts[1].replace('% sys', '').strip())
|
|
idle = float(parts[2].replace('% idle', '').strip())
|
|
system_metrics['cpu_percent'] = round(user + sys_cpu, 1)
|
|
system_metrics['cpu_idle'] = round(idle, 1)
|
|
break
|
|
except Exception:
|
|
# Linux fallback
|
|
try:
|
|
result = subprocess.run(['grep', 'cpu ', '/proc/stat'], capture_output=True, text=True, timeout=5)
|
|
if result.returncode == 0:
|
|
parts = result.stdout.split()
|
|
idle = int(parts[4])
|
|
total = sum(int(x) for x in parts[1:])
|
|
system_metrics['cpu_percent'] = round(100 * (1 - idle / total), 1)
|
|
system_metrics['cpu_idle'] = round(100 * idle / total, 1)
|
|
except Exception:
|
|
system_metrics['cpu_percent'] = None
|
|
system_metrics['cpu_idle'] = None
|
|
|
|
# RAM usage
|
|
try:
|
|
# macOS
|
|
result = subprocess.run(['vm_stat'], capture_output=True, text=True, timeout=5)
|
|
if result.returncode == 0 and 'Pages' in result.stdout:
|
|
lines = result.stdout.strip().split('\n')
|
|
page_size = 16384 # bytes
|
|
stats = {}
|
|
for line in lines[1:]:
|
|
if ':' in line:
|
|
key, val = line.split(':')
|
|
stats[key.strip()] = int(val.strip().rstrip('.'))
|
|
free = stats.get('Pages free', 0) * page_size
|
|
active = stats.get('Pages active', 0) * page_size
|
|
inactive = stats.get('Pages inactive', 0) * page_size
|
|
wired = stats.get('Pages wired down', 0) * page_size
|
|
total_used = active + inactive + wired
|
|
total_mem = total_used + free
|
|
system_metrics['ram_total_gb'] = round(total_mem / (1024**3), 1)
|
|
system_metrics['ram_used_gb'] = round(total_used / (1024**3), 1)
|
|
system_metrics['ram_percent'] = round(100 * total_used / total_mem, 1)
|
|
else:
|
|
raise Exception("Not macOS")
|
|
except Exception:
|
|
# Linux fallback
|
|
try:
|
|
result = subprocess.run(['free', '-b'], capture_output=True, text=True, timeout=5)
|
|
if result.returncode == 0:
|
|
lines = result.stdout.strip().split('\n')
|
|
mem_line = lines[1].split()
|
|
total = int(mem_line[1])
|
|
used = int(mem_line[2])
|
|
system_metrics['ram_total_gb'] = round(total / (1024**3), 1)
|
|
system_metrics['ram_used_gb'] = round(used / (1024**3), 1)
|
|
system_metrics['ram_percent'] = round(100 * used / total, 1)
|
|
except Exception:
|
|
system_metrics['ram_total_gb'] = None
|
|
system_metrics['ram_used_gb'] = None
|
|
system_metrics['ram_percent'] = None
|
|
|
|
# Disk usage
|
|
try:
|
|
result = subprocess.run(['df', '-h', '/'], capture_output=True, text=True, timeout=5)
|
|
if result.returncode == 0:
|
|
lines = result.stdout.strip().split('\n')
|
|
parts = lines[1].split()
|
|
system_metrics['disk_total'] = parts[1]
|
|
system_metrics['disk_used'] = parts[2]
|
|
system_metrics['disk_percent'] = int(parts[4].replace('%', ''))
|
|
except Exception:
|
|
system_metrics['disk_total'] = None
|
|
system_metrics['disk_used'] = None
|
|
system_metrics['disk_percent'] = None
|
|
|
|
# System uptime
|
|
try:
|
|
result = subprocess.run(['/usr/bin/uptime'], capture_output=True, text=True, timeout=5)
|
|
if result.returncode == 0:
|
|
system_metrics['uptime'] = result.stdout.strip().split('up')[1].split(',')[0].strip()
|
|
except Exception:
|
|
system_metrics['uptime'] = None
|
|
|
|
# Load average
|
|
try:
|
|
result = subprocess.run(['/usr/bin/uptime'], capture_output=True, text=True, timeout=5)
|
|
if result.returncode == 0:
|
|
# Parse: "load average: 0.52, 0.58, 0.59"
|
|
load_part = result.stdout.split('load average:')[1].strip()
|
|
loads = [float(x.strip()) for x in load_part.split(',')]
|
|
system_metrics['load_1'] = loads[0]
|
|
system_metrics['load_5'] = loads[1]
|
|
system_metrics['load_15'] = loads[2]
|
|
except Exception:
|
|
system_metrics['load_1'] = None
|
|
|
|
# ===== SSL CERTIFICATE =====
|
|
ssl_metrics = {}
|
|
try:
|
|
import ssl
|
|
import socket
|
|
context = ssl.create_default_context()
|
|
with socket.create_connection(('nordabiznes.pl', 443), timeout=5) as sock:
|
|
with context.wrap_socket(sock, server_hostname='nordabiznes.pl') as ssock:
|
|
cert = ssock.getpeercert()
|
|
not_after = datetime.strptime(cert['notAfter'], '%b %d %H:%M:%S %Y %Z')
|
|
ssl_metrics['domain'] = 'nordabiznes.pl'
|
|
ssl_metrics['expires'] = not_after.strftime('%Y-%m-%d')
|
|
ssl_metrics['days_left'] = (not_after - datetime.now()).days
|
|
ssl_metrics['issuer'] = dict(x[0] for x in cert['issuer']).get('organizationName', 'Unknown')
|
|
ssl_metrics['status'] = 'ok' if ssl_metrics['days_left'] > 14 else 'warning' if ssl_metrics['days_left'] > 0 else 'expired'
|
|
except Exception as e:
|
|
ssl_metrics['status'] = 'error'
|
|
ssl_metrics['error'] = str(e)[:50]
|
|
|
|
# ===== GIT/DEPLOY INFO =====
|
|
deploy_metrics = {}
|
|
try:
|
|
# Current commit
|
|
result = subprocess.run(['/usr/bin/git', 'rev-parse', '--short', 'HEAD'],
|
|
capture_output=True, text=True, timeout=5, cwd='/var/www/nordabiznes')
|
|
if result.returncode == 0:
|
|
deploy_metrics['commit'] = result.stdout.strip()
|
|
|
|
# Commit date
|
|
result = subprocess.run(['/usr/bin/git', 'log', '-1', '--format=%ci'],
|
|
capture_output=True, text=True, timeout=5, cwd='/var/www/nordabiznes')
|
|
if result.returncode == 0:
|
|
deploy_metrics['commit_date'] = result.stdout.strip()[:16]
|
|
|
|
# Commit message (first line)
|
|
result = subprocess.run(['/usr/bin/git', 'log', '-1', '--format=%s'],
|
|
capture_output=True, text=True, timeout=5, cwd='/var/www/nordabiznes')
|
|
if result.returncode == 0:
|
|
deploy_metrics['commit_message'] = result.stdout.strip()[:60]
|
|
|
|
# Branch
|
|
result = subprocess.run(['/usr/bin/git', 'branch', '--show-current'],
|
|
capture_output=True, text=True, timeout=5, cwd='/var/www/nordabiznes')
|
|
if result.returncode == 0:
|
|
deploy_metrics['branch'] = result.stdout.strip()
|
|
except Exception as e:
|
|
deploy_metrics['error'] = str(e)[:50]
|
|
|
|
# ===== DATABASE METRICS =====
|
|
db_metrics = {}
|
|
|
|
try:
|
|
# PostgreSQL version
|
|
version_result = db.execute(text("SELECT version()")).scalar()
|
|
# Extract just version number: "PostgreSQL 16.11 ..." -> "16.11"
|
|
if version_result:
|
|
import re
|
|
match = re.search(r'PostgreSQL (\d+\.\d+)', version_result)
|
|
db_metrics['version'] = match.group(1) if match else version_result.split()[1]
|
|
|
|
# Database size
|
|
result = db.execute(text("SELECT pg_database_size(current_database())")).scalar()
|
|
db_metrics['size_mb'] = round(result / (1024 * 1024), 2)
|
|
|
|
# Active connections
|
|
result = db.execute(text("SELECT count(*) FROM pg_stat_activity WHERE state = 'active'")).scalar()
|
|
db_metrics['active_connections'] = result
|
|
|
|
# Total connections
|
|
result = db.execute(text("SELECT count(*) FROM pg_stat_activity")).scalar()
|
|
db_metrics['total_connections'] = result
|
|
|
|
# Table counts
|
|
db_metrics['companies'] = db.query(Company).count()
|
|
db_metrics['users'] = db.query(User).count()
|
|
|
|
# Get additional counts if tables exist
|
|
try:
|
|
from database import AIChatMessage
|
|
db_metrics['chat_messages'] = db.query(AIChatMessage).count()
|
|
except Exception:
|
|
pass
|
|
try:
|
|
db_metrics['social_media'] = db.query(CompanySocialMedia).count()
|
|
db_metrics['seo_audits'] = db.query(CompanyWebsiteAnalysis).filter(
|
|
CompanyWebsiteAnalysis.seo_audited_at.isnot(None)
|
|
).count()
|
|
except Exception:
|
|
db.rollback() # Reset transaction after error
|
|
pass
|
|
|
|
# Cache hit ratio
|
|
try:
|
|
result = db.execute(text("""
|
|
SELECT
|
|
sum(heap_blks_hit) as hits,
|
|
sum(heap_blks_read) as reads
|
|
FROM pg_statio_user_tables
|
|
""")).fetchone()
|
|
if result and result[0] and (result[0] + result[1]) > 0:
|
|
db_metrics['cache_hit_ratio'] = round(100 * result[0] / (result[0] + result[1]), 1)
|
|
except Exception:
|
|
db.rollback() # Reset transaction after error
|
|
pass
|
|
|
|
# Slow queries (queries over 1 second in last 24h)
|
|
# Note: pg_stat_statements extension may not be installed
|
|
try:
|
|
result = db.execute(text("""
|
|
SELECT count(*) FROM pg_stat_statements
|
|
WHERE mean_exec_time > 1000
|
|
""")).scalar()
|
|
db_metrics['slow_queries'] = result or 0
|
|
except Exception:
|
|
db.rollback() # Reset transaction after error
|
|
db_metrics['slow_queries'] = 'N/A'
|
|
|
|
# Deadlocks
|
|
try:
|
|
result = db.execute(text("""
|
|
SELECT deadlocks FROM pg_stat_database
|
|
WHERE datname = current_database()
|
|
""")).scalar()
|
|
db_metrics['deadlocks'] = result or 0
|
|
except Exception:
|
|
db.rollback() # Reset transaction after error
|
|
pass
|
|
|
|
db_metrics['status'] = 'ok'
|
|
except Exception as e:
|
|
db.rollback() # Reset transaction after error
|
|
db_metrics['status'] = 'error'
|
|
db_metrics['error'] = str(e)[:100]
|
|
|
|
# ===== APPLICATION METRICS =====
|
|
app_metrics = {}
|
|
|
|
# Health check - test key endpoints
|
|
from flask import current_app
|
|
try:
|
|
with current_app.test_client() as client:
|
|
endpoints_ok = 0
|
|
endpoints_total = 5
|
|
test_endpoints = ['/', '/login', '/api/companies', '/health', '/search?q=test']
|
|
for ep in test_endpoints:
|
|
try:
|
|
response = client.get(ep, follow_redirects=False)
|
|
if response.status_code in (200, 302, 304):
|
|
endpoints_ok += 1
|
|
except Exception:
|
|
pass
|
|
app_metrics['endpoints_ok'] = endpoints_ok
|
|
app_metrics['endpoints_total'] = endpoints_total
|
|
app_metrics['endpoints_percent'] = round(100 * endpoints_ok / endpoints_total, 0)
|
|
except Exception:
|
|
app_metrics['endpoints_ok'] = None
|
|
|
|
# Users statistics
|
|
try:
|
|
app_metrics['admins'] = db.query(User).filter(User.role == 'ADMIN').count()
|
|
app_metrics['users_with_2fa'] = db.query(User).filter(User.totp_enabled == True).count()
|
|
except Exception:
|
|
db.rollback()
|
|
app_metrics['admins'] = 0
|
|
app_metrics['users_with_2fa'] = 0
|
|
|
|
# Recent activity (last 24h)
|
|
yesterday = now - timedelta(days=1)
|
|
try:
|
|
app_metrics['logins_24h'] = db.query(AuditLog).filter(
|
|
AuditLog.action == 'login',
|
|
AuditLog.created_at >= yesterday
|
|
).count()
|
|
except Exception:
|
|
db.rollback()
|
|
app_metrics['logins_24h'] = 0
|
|
|
|
# Security alerts (last 24h)
|
|
try:
|
|
app_metrics['alerts_24h'] = db.query(SecurityAlert).filter(
|
|
SecurityAlert.created_at >= yesterday
|
|
).count()
|
|
except Exception:
|
|
db.rollback()
|
|
app_metrics['alerts_24h'] = 0
|
|
|
|
# ===== SECURITY METRICS =====
|
|
security_metrics = {}
|
|
|
|
# Failed logins (24h)
|
|
try:
|
|
security_metrics['failed_logins_24h'] = db.query(AuditLog).filter(
|
|
AuditLog.action == 'login_failed',
|
|
AuditLog.created_at >= yesterday
|
|
).count()
|
|
except Exception:
|
|
db.rollback()
|
|
security_metrics['failed_logins_24h'] = 0
|
|
|
|
# Blocked by GeoIP (24h)
|
|
try:
|
|
security_metrics['geoip_blocked_24h'] = db.query(SecurityAlert).filter(
|
|
SecurityAlert.alert_type == 'geoip_blocked',
|
|
SecurityAlert.created_at >= yesterday
|
|
).count()
|
|
except Exception:
|
|
db.rollback()
|
|
security_metrics['geoip_blocked_24h'] = 0
|
|
|
|
# Rate limit hits (24h)
|
|
try:
|
|
security_metrics['rate_limit_24h'] = db.query(SecurityAlert).filter(
|
|
SecurityAlert.alert_type == 'rate_limit',
|
|
SecurityAlert.created_at >= yesterday
|
|
).count()
|
|
except Exception:
|
|
db.rollback()
|
|
security_metrics['rate_limit_24h'] = 0
|
|
|
|
# Brute force blocked accounts
|
|
try:
|
|
security_metrics['locked_accounts'] = db.query(User).filter(
|
|
User.is_locked == True
|
|
).count()
|
|
except Exception:
|
|
db.rollback()
|
|
security_metrics['locked_accounts'] = 0
|
|
|
|
# Unique IPs blocked (24h)
|
|
try:
|
|
result = db.query(func.count(func.distinct(SecurityAlert.ip_address))).filter(
|
|
SecurityAlert.created_at >= yesterday
|
|
).scalar()
|
|
security_metrics['unique_ips_blocked'] = result or 0
|
|
except Exception:
|
|
db.rollback()
|
|
security_metrics['unique_ips_blocked'] = 0
|
|
|
|
# ===== GUNICORN/PROCESS METRICS =====
|
|
process_metrics = {}
|
|
try:
|
|
result = subprocess.run(['/usr/bin/pgrep', '-f', 'gunicorn'], capture_output=True, text=True, timeout=5)
|
|
logger.info(f"pgrep gunicorn: returncode={result.returncode}, stdout={result.stdout.strip()}")
|
|
if result.returncode == 0:
|
|
pids = result.stdout.strip().split('\n')
|
|
process_metrics['gunicorn_workers'] = len(pids) - 1 # -1 for master
|
|
process_metrics['gunicorn_status'] = 'running'
|
|
else:
|
|
process_metrics['gunicorn_status'] = 'not found'
|
|
except Exception as e:
|
|
logger.error(f"pgrep gunicorn exception: {e}")
|
|
process_metrics['gunicorn_status'] = 'unknown'
|
|
|
|
# ===== EXTERNAL APIS STATUS =====
|
|
external_apis = []
|
|
import urllib.request
|
|
import time as time_module
|
|
|
|
# Gemini API
|
|
try:
|
|
start = time_module.time()
|
|
req = urllib.request.Request('https://generativelanguage.googleapis.com/', method='HEAD')
|
|
req.add_header('User-Agent', 'NordaBiz-HealthCheck/1.0')
|
|
urllib.request.urlopen(req, timeout=5)
|
|
latency = round((time_module.time() - start) * 1000)
|
|
external_apis.append({'name': 'Google Gemini', 'status': 'ok', 'latency': latency})
|
|
except Exception:
|
|
external_apis.append({'name': 'Google Gemini', 'status': 'error', 'latency': None})
|
|
|
|
# Brave Search API
|
|
try:
|
|
start = time_module.time()
|
|
req = urllib.request.Request('https://api.search.brave.com/', method='HEAD')
|
|
req.add_header('User-Agent', 'NordaBiz-HealthCheck/1.0')
|
|
urllib.request.urlopen(req, timeout=5)
|
|
latency = round((time_module.time() - start) * 1000)
|
|
external_apis.append({'name': 'Brave Search', 'status': 'ok', 'latency': latency})
|
|
except Exception:
|
|
external_apis.append({'name': 'Brave Search', 'status': 'error', 'latency': None})
|
|
|
|
# Google PageSpeed API
|
|
try:
|
|
start = time_module.time()
|
|
req = urllib.request.Request('https://pagespeedonline.googleapis.com/', method='HEAD')
|
|
req.add_header('User-Agent', 'NordaBiz-HealthCheck/1.0')
|
|
urllib.request.urlopen(req, timeout=5)
|
|
latency = round((time_module.time() - start) * 1000)
|
|
external_apis.append({'name': 'PageSpeed API', 'status': 'ok', 'latency': latency})
|
|
except Exception:
|
|
external_apis.append({'name': 'PageSpeed API', 'status': 'error', 'latency': None})
|
|
|
|
# ===== SERVERS PING =====
|
|
servers_status = []
|
|
servers_to_ping = [
|
|
('NORDABIZ-01', '10.22.68.249'),
|
|
('R11-REVPROXY-01', '10.22.68.250'),
|
|
('R11-DNS-01', '10.22.68.171'),
|
|
('R11-GIT-INPI', '10.22.68.180'),
|
|
]
|
|
|
|
for name, ip in servers_to_ping:
|
|
try:
|
|
start = time_module.time()
|
|
result = subprocess.run(['/usr/bin/ping', '-c', '1', '-W', '2', ip],
|
|
capture_output=True, text=True, timeout=5)
|
|
latency = round((time_module.time() - start) * 1000)
|
|
if result.returncode == 0:
|
|
servers_status.append({'name': name, 'ip': ip, 'status': 'online', 'latency': latency})
|
|
else:
|
|
servers_status.append({'name': name, 'ip': ip, 'status': 'offline', 'latency': None})
|
|
except Exception:
|
|
servers_status.append({'name': name, 'ip': ip, 'status': 'unknown', 'latency': None})
|
|
|
|
# ===== TECHNOLOGY STACK =====
|
|
import flask
|
|
import sqlalchemy
|
|
# Technology stack - ONLY VERIFIED VERSIONS (checked via SSH 2026-01-14)
|
|
# Dynamic versions are fetched at runtime, static ones were verified manually
|
|
technology_stack = {
|
|
'programming': [
|
|
{'name': 'Python', 'version': platform.python_version(), 'icon': '🐍', 'category': 'Backend'},
|
|
{'name': 'Flask', 'version': flask.__version__, 'icon': '🌶️', 'category': 'Web Framework'},
|
|
{'name': 'SQLAlchemy', 'version': sqlalchemy.__version__, 'icon': '🗃️', 'category': 'ORM'},
|
|
{'name': 'Jinja2', 'version': '3.1.6', 'icon': '📄', 'category': 'Templating'},
|
|
{'name': 'Werkzeug', 'version': '3.1.3', 'icon': '🔧', 'category': 'WSGI Toolkit'},
|
|
],
|
|
'databases': [
|
|
{'name': 'PostgreSQL', 'version': db_metrics.get('version', 'N/A'), 'icon': '🐘', 'category': 'Primary DB'},
|
|
],
|
|
'ai': [
|
|
{'name': 'Google Gemini', 'version': '3 Flash', 'icon': '🤖', 'category': 'AI Chat'},
|
|
{'name': 'Brave Search API', 'version': 'v1', 'icon': '🔍', 'category': 'News Search'},
|
|
{'name': 'Google PageSpeed', 'version': 'v5', 'icon': '⚡', 'category': 'SEO Audit'},
|
|
],
|
|
'infrastructure': [
|
|
{'name': 'Proxmox VE', 'version': '9.1.1', 'icon': '🖥️', 'category': 'Wirtualizacja'},
|
|
{'name': 'Ubuntu Server', 'version': '24.04.3 LTS', 'icon': '🐧', 'category': 'System OS'},
|
|
{'name': 'Nginx', 'version': '1.24.0', 'icon': '🔧', 'category': 'Web Server'},
|
|
],
|
|
'network': [
|
|
{'name': 'Fortigate 500D', 'version': None, 'icon': '🛡️', 'category': 'Firewall/VPN'},
|
|
{'name': 'Nginx Proxy Manager', 'version': '2.12.6', 'icon': '🔀', 'category': 'Reverse Proxy'},
|
|
{'name': 'Docker', 'version': '28.2.2', 'icon': '🐳', 'category': 'Containers'},
|
|
{'name': "Let's Encrypt", 'version': 'ACME v2', 'icon': '🔒', 'category': 'SSL/TLS'},
|
|
],
|
|
'security': [
|
|
{'name': 'Flask-Login', 'version': '0.6.3', 'icon': '🔐', 'category': 'Autentykacja'},
|
|
{'name': 'Flask-WTF', 'version': '1.2.2', 'icon': '🛡️', 'category': 'CSRF Protection'},
|
|
{'name': 'Flask-Limiter', 'version': '4.0.0', 'icon': '⏱️', 'category': 'Rate Limiting'},
|
|
{'name': 'geoip2', 'version': '5.2.0', 'icon': '🌍', 'category': 'GeoIP Blocking'},
|
|
{'name': 'PyOTP', 'version': '2.9.0', 'icon': '📱', 'category': '2FA/TOTP'},
|
|
],
|
|
'devops': [
|
|
{'name': 'Git', 'version': '2.43.0', 'icon': '📦', 'category': 'Version Control'},
|
|
{'name': 'Gitea', 'version': '1.22.6', 'icon': '🍵', 'category': 'Git Server'},
|
|
{'name': 'systemd', 'version': '255', 'icon': '⚙️', 'category': 'Service Manager'},
|
|
],
|
|
'servers': [
|
|
{'name': 'NORDABIZ-01', 'ip': '10.22.68.249', 'icon': '🖥️', 'role': 'App Server (VM 249)'},
|
|
{'name': 'R11-REVPROXY-01', 'ip': '10.22.68.250', 'icon': '🔀', 'role': 'Reverse Proxy (VM 119)'},
|
|
{'name': 'R11-DNS-01', 'ip': '10.22.68.171', 'icon': '📡', 'role': 'DNS Server (VM 122)'},
|
|
{'name': 'R11-GIT-INPI', 'ip': '10.22.68.180', 'icon': '📦', 'role': 'Git Server (VM 180)'},
|
|
],
|
|
}
|
|
|
|
return render_template(
|
|
'admin/status_dashboard.html',
|
|
system_metrics=system_metrics,
|
|
db_metrics=db_metrics,
|
|
app_metrics=app_metrics,
|
|
process_metrics=process_metrics,
|
|
technology_stack=technology_stack,
|
|
ssl_metrics=ssl_metrics,
|
|
deploy_metrics=deploy_metrics,
|
|
security_metrics=security_metrics,
|
|
external_apis=external_apis,
|
|
servers_status=servers_status,
|
|
generated_at=now
|
|
)
|
|
finally:
|
|
db.close()
|
|
|
|
|
|
@bp.route('/api/status')
|
|
@login_required
|
|
@role_required(SystemRole.OFFICE_MANAGER)
|
|
def api_admin_status():
|
|
"""API endpoint for status dashboard auto-refresh"""
|
|
db = SessionLocal()
|
|
try:
|
|
now = datetime.now()
|
|
data = {'timestamp': now.isoformat()}
|
|
|
|
# System metrics
|
|
system = {}
|
|
try:
|
|
# CPU (Linux)
|
|
result = subprocess.run(['grep', 'cpu ', '/proc/stat'], capture_output=True, text=True, timeout=2)
|
|
if result.returncode == 0:
|
|
parts = result.stdout.split()
|
|
idle = int(parts[4])
|
|
total = sum(int(x) for x in parts[1:])
|
|
system['cpu_percent'] = round(100 * (1 - idle / total), 1)
|
|
except Exception:
|
|
system['cpu_percent'] = None
|
|
|
|
try:
|
|
# RAM (Linux)
|
|
result = subprocess.run(['free', '-b'], capture_output=True, text=True, timeout=2)
|
|
if result.returncode == 0:
|
|
lines = result.stdout.strip().split('\n')
|
|
mem_line = lines[1].split()
|
|
total = int(mem_line[1])
|
|
used = int(mem_line[2])
|
|
system['ram_percent'] = round(100 * used / total, 1)
|
|
except Exception:
|
|
system['ram_percent'] = None
|
|
|
|
try:
|
|
# Disk
|
|
result = subprocess.run(['df', '-h', '/'], capture_output=True, text=True, timeout=2)
|
|
if result.returncode == 0:
|
|
lines = result.stdout.strip().split('\n')
|
|
parts = lines[1].split()
|
|
system['disk_percent'] = int(parts[4].replace('%', ''))
|
|
except Exception:
|
|
system['disk_percent'] = None
|
|
|
|
data['system'] = system
|
|
|
|
# Database metrics
|
|
db_data = {}
|
|
try:
|
|
db_data['active_connections'] = db.execute(text("SELECT count(*) FROM pg_stat_activity WHERE state = 'active'")).scalar()
|
|
db_data['status'] = 'ok'
|
|
except Exception as e:
|
|
db_data['status'] = 'error'
|
|
db_data['error'] = str(e)[:50]
|
|
|
|
data['database'] = db_data
|
|
|
|
# App metrics
|
|
yesterday = now - timedelta(days=1)
|
|
app_data = {
|
|
'alerts_24h': db.query(SecurityAlert).filter(SecurityAlert.created_at >= yesterday).count()
|
|
}
|
|
data['app'] = app_data
|
|
|
|
return jsonify(data)
|
|
finally:
|
|
db.close()
|
|
|
|
|
|
# ============================================================
|
|
# HEALTH CHECK DASHBOARD
|
|
# ============================================================
|
|
|
|
@bp.route('/health')
|
|
@login_required
|
|
@role_required(SystemRole.OFFICE_MANAGER)
|
|
def admin_health():
|
|
"""
|
|
Graphical health check dashboard.
|
|
Shows status of all critical endpoints with visual indicators.
|
|
"""
|
|
from flask import current_app
|
|
|
|
results = []
|
|
categories = {
|
|
'public': {'name': 'Strony publiczne', 'icon': '🌐', 'endpoints': []},
|
|
'auth': {'name': 'Autentykacja', 'icon': '🔐', 'endpoints': []},
|
|
'api': {'name': 'API', 'icon': '⚡', 'endpoints': []},
|
|
'admin': {'name': 'Panel admina', 'icon': '👨💼', 'endpoints': []},
|
|
'company': {'name': 'Profile firm', 'icon': '🏢', 'endpoints': []},
|
|
}
|
|
|
|
# Endpoints to check (path, name, category)
|
|
endpoints = [
|
|
('/', 'Strona główna', 'public'),
|
|
('/release-notes', 'Historia zmian', 'public'),
|
|
('/search?q=test', 'Wyszukiwarka', 'public'),
|
|
('/chat', 'NordaGPT Chat', 'public'),
|
|
('/raporty/', 'Raporty', 'public'),
|
|
('/login', 'Logowanie', 'auth'),
|
|
('/register', 'Rejestracja', 'auth'),
|
|
('/api/companies', 'Lista firm', 'api'),
|
|
('/health', 'Health check', 'api'),
|
|
('/admin/security', 'Bezpieczeństwo', 'admin'),
|
|
('/admin/seo', 'SEO Audit', 'admin'),
|
|
('/admin/social-audit', 'Audyt social media', 'admin'),
|
|
('/admin/analytics', 'Analityka', 'admin'),
|
|
('/admin/forum', 'Forum', 'admin'),
|
|
('/admin/kalendarz', 'Kalendarz', 'admin'),
|
|
('/admin/status', 'Status systemu', 'admin'),
|
|
('/admin/fees', 'Składki (FIS)', 'admin'),
|
|
('/admin/zopk/news', 'ZOPK News', 'admin'),
|
|
('/admin/recommendations', 'Rekomendacje', 'admin'),
|
|
]
|
|
|
|
# Add company profiles: INPI, Waterm (fixed) + 3 random
|
|
db = SessionLocal()
|
|
try:
|
|
import random as rnd
|
|
|
|
# Fixed companies to always check
|
|
fixed_companies = db.query(Company).filter(
|
|
Company.name.ilike('%INPI%') | Company.name.ilike('%Waterm%')
|
|
).all()
|
|
|
|
for company in fixed_companies:
|
|
endpoints.append((f'/company/{company.slug}', company.name[:30], 'company'))
|
|
|
|
# 3 random companies (excluding fixed ones)
|
|
fixed_ids = [c.id for c in fixed_companies]
|
|
all_other = db.query(Company).filter(~Company.id.in_(fixed_ids)).all()
|
|
random_companies = rnd.sample(all_other, min(3, len(all_other)))
|
|
|
|
for company in random_companies:
|
|
endpoints.append((f'/company/{company.slug}', f'{company.name[:25]}...', 'company'))
|
|
|
|
finally:
|
|
db.close()
|
|
|
|
# Test each endpoint
|
|
with current_app.test_client() as client:
|
|
for path, name, category in endpoints:
|
|
start_time = datetime.now()
|
|
try:
|
|
response = client.get(path, follow_redirects=False)
|
|
status_code = response.status_code
|
|
response_time = (datetime.now() - start_time).total_seconds() * 1000 # ms
|
|
|
|
# Determine status
|
|
# 429 = rate limited (endpoint works, just protected)
|
|
# 403 = forbidden (endpoint works, requires auth)
|
|
if status_code in (200, 302, 304, 429):
|
|
status = 'ok'
|
|
elif status_code == 404:
|
|
status = 'not_found'
|
|
elif status_code >= 500:
|
|
status = 'error'
|
|
else:
|
|
status = 'warning'
|
|
|
|
result = {
|
|
'path': path,
|
|
'name': name,
|
|
'status_code': status_code,
|
|
'status': status,
|
|
'response_time': round(response_time, 1),
|
|
'error': None
|
|
}
|
|
|
|
except Exception as e:
|
|
result = {
|
|
'path': path,
|
|
'name': name,
|
|
'status_code': 500,
|
|
'status': 'error',
|
|
'response_time': None,
|
|
'error': str(e)[:100]
|
|
}
|
|
|
|
categories[category]['endpoints'].append(result)
|
|
results.append(result)
|
|
|
|
# Summary stats
|
|
total = len(results)
|
|
ok_count = sum(1 for r in results if r['status'] == 'ok')
|
|
warning_count = sum(1 for r in results if r['status'] == 'warning')
|
|
error_count = sum(1 for r in results if r['status'] in ('error', 'not_found'))
|
|
avg_response_time = sum(r['response_time'] for r in results if r['response_time']) / total if total else 0
|
|
|
|
summary = {
|
|
'total': total,
|
|
'ok': ok_count,
|
|
'warning': warning_count,
|
|
'error': error_count,
|
|
'health_percent': round(100 * ok_count / total, 1) if total else 0,
|
|
'avg_response_time': round(avg_response_time, 1),
|
|
'overall_status': 'ok' if error_count == 0 else ('degraded' if ok_count > error_count else 'critical')
|
|
}
|
|
|
|
return render_template(
|
|
'admin/health_dashboard.html',
|
|
categories=categories,
|
|
summary=summary,
|
|
generated_at=datetime.now()
|
|
)
|
|
|
|
|
|
@bp.route('/api/health')
|
|
@login_required
|
|
@role_required(SystemRole.OFFICE_MANAGER)
|
|
def api_admin_health():
|
|
"""API endpoint for health dashboard auto-refresh"""
|
|
from flask import current_app
|
|
|
|
# Run the same checks as admin_health but return JSON
|
|
results = []
|
|
|
|
endpoints = [
|
|
('/', 'Strona główna'),
|
|
('/release-notes', 'Historia zmian'),
|
|
('/search?q=test', 'Wyszukiwarka'),
|
|
('/chat', 'NordaGPT Chat'),
|
|
('/login', 'Logowanie'),
|
|
('/api/companies', 'Lista firm'),
|
|
('/health', 'Health check'),
|
|
('/admin/security', 'Bezpieczeństwo'),
|
|
('/admin/status', 'Status systemu'),
|
|
('/admin/fees', 'Składki (FIS)'),
|
|
('/admin/zopk/news', 'ZOPK News'),
|
|
]
|
|
|
|
with current_app.test_client() as client:
|
|
for path, name in endpoints:
|
|
try:
|
|
response = client.get(path, follow_redirects=False)
|
|
status_code = response.status_code
|
|
ok = status_code in (200, 302, 304, 429) # 429 = rate limited, endpoint works
|
|
results.append({'path': path, 'name': name, 'status': status_code, 'ok': ok})
|
|
except Exception as e:
|
|
results.append({'path': path, 'name': name, 'status': 500, 'ok': False, 'error': str(e)[:50]})
|
|
|
|
ok_count = sum(1 for r in results if r['ok'])
|
|
|
|
return jsonify({
|
|
'success': True,
|
|
'timestamp': datetime.now().isoformat(),
|
|
'results': results,
|
|
'summary': {
|
|
'total': len(results),
|
|
'ok': ok_count,
|
|
'failed': len(results) - ok_count,
|
|
'health_percent': round(100 * ok_count / len(results), 1)
|
|
}
|
|
})
|
|
|
|
|
|
# ============================================================
|
|
# UPTIME MONITORING
|
|
# ============================================================
|
|
|
|
def _get_uptime_data(db, days=30):
|
|
"""Pobierz dane uptime dla dashboardu"""
|
|
now = datetime.now()
|
|
data = {}
|
|
|
|
# Aktywne monitory
|
|
monitors = db.query(UptimeMonitor).filter_by(is_active=True).all()
|
|
if not monitors:
|
|
return {'monitors': [], 'has_data': False}
|
|
|
|
monitor = monitors[0] # Główny monitor
|
|
data['monitor'] = {
|
|
'name': monitor.name,
|
|
'url': monitor.url,
|
|
'id': monitor.id
|
|
}
|
|
data['has_data'] = True
|
|
|
|
# Ostatni check
|
|
last_check = db.query(UptimeCheck).filter_by(
|
|
monitor_id=monitor.id
|
|
).order_by(UptimeCheck.checked_at.desc()).first()
|
|
|
|
if last_check:
|
|
data['current_status'] = last_check.status
|
|
data['last_checked'] = last_check.checked_at.strftime('%Y-%m-%d %H:%M')
|
|
data['last_response_time'] = last_check.response_time_ms
|
|
else:
|
|
data['current_status'] = 'unknown'
|
|
data['last_checked'] = None
|
|
data['last_response_time'] = None
|
|
|
|
# Uptime % dla różnych okresów
|
|
data['uptime'] = {}
|
|
for period_name, period_days in [('24h', 1), ('7d', 7), ('30d', 30), ('90d', 90)]:
|
|
cutoff = now - timedelta(days=period_days)
|
|
total = db.query(UptimeCheck).filter(
|
|
UptimeCheck.monitor_id == monitor.id,
|
|
UptimeCheck.checked_at >= cutoff
|
|
).count()
|
|
up = db.query(UptimeCheck).filter(
|
|
UptimeCheck.monitor_id == monitor.id,
|
|
UptimeCheck.checked_at >= cutoff,
|
|
UptimeCheck.status == 'up'
|
|
).count()
|
|
pct = round(100 * up / total, 3) if total > 0 else None
|
|
data['uptime'][period_name] = {
|
|
'percent': pct,
|
|
'total_checks': total,
|
|
'up_checks': up,
|
|
'down_checks': total - up if total else 0
|
|
}
|
|
|
|
# Response time (ostatnie N dni)
|
|
cutoff = now - timedelta(days=days)
|
|
response_times = db.query(
|
|
UptimeCheck.checked_at,
|
|
UptimeCheck.response_time_ms
|
|
).filter(
|
|
UptimeCheck.monitor_id == monitor.id,
|
|
UptimeCheck.checked_at >= cutoff,
|
|
UptimeCheck.response_time_ms.isnot(None)
|
|
).order_by(UptimeCheck.checked_at).all()
|
|
|
|
data['response_times'] = [
|
|
{'time': rt.checked_at.strftime('%Y-%m-%d %H:%M'), 'ms': rt.response_time_ms}
|
|
for rt in response_times
|
|
]
|
|
|
|
# Średni response time
|
|
if response_times:
|
|
avg_rt = sum(rt.response_time_ms for rt in response_times) / len(response_times)
|
|
data['avg_response_time'] = round(avg_rt)
|
|
else:
|
|
data['avg_response_time'] = None
|
|
|
|
# Incydenty
|
|
incidents = db.query(UptimeIncident).filter(
|
|
UptimeIncident.monitor_id == monitor.id
|
|
).order_by(UptimeIncident.started_at.desc()).limit(50).all()
|
|
|
|
data['incidents'] = [{
|
|
'id': inc.id,
|
|
'started_at': inc.started_at.strftime('%Y-%m-%d %H:%M'),
|
|
'ended_at': inc.ended_at.strftime('%Y-%m-%d %H:%M') if inc.ended_at else None,
|
|
'duration_seconds': inc.duration_seconds,
|
|
'duration_human': _format_duration(inc.duration_seconds) if inc.duration_seconds else 'trwa...',
|
|
'cause': inc.cause,
|
|
'cause_label': {'isp': 'ISP (Chopin)', 'server': 'Serwer', 'infra': 'Infrastruktura', 'unknown': 'Nieznana'}.get(inc.cause, inc.cause),
|
|
'notes': inc.notes or ''
|
|
} for inc in incidents]
|
|
|
|
# Analiza wzorców — awarie wg godziny i dnia tygodnia
|
|
all_incidents = db.query(UptimeIncident).filter(
|
|
UptimeIncident.monitor_id == monitor.id
|
|
).all()
|
|
|
|
hour_counts = [0] * 24
|
|
dow_counts = [0] * 7 # 0=pon, 6=nie
|
|
cause_counts = {'isp': 0, 'server': 0, 'infra': 0, 'unknown': 0}
|
|
|
|
for inc in all_incidents:
|
|
hour_counts[inc.started_at.hour] += 1
|
|
dow_counts[inc.started_at.weekday()] += 1
|
|
cause_counts[inc.cause] = cause_counts.get(inc.cause, 0) + 1
|
|
|
|
data['patterns'] = {
|
|
'by_hour': hour_counts,
|
|
'by_dow': dow_counts,
|
|
'by_cause': cause_counts
|
|
}
|
|
|
|
# Raport miesięczny (bieżący miesiąc)
|
|
month_start = now.replace(day=1, hour=0, minute=0, second=0, microsecond=0)
|
|
month_incidents = [i for i in all_incidents if i.started_at >= month_start]
|
|
month_downtime = sum(i.duration_seconds or 0 for i in month_incidents)
|
|
days_in_month = (now - month_start).days or 1
|
|
month_total_seconds = days_in_month * 86400
|
|
month_uptime_pct = round(100 * (1 - month_downtime / month_total_seconds), 3) if month_total_seconds > 0 else 100
|
|
|
|
# Poprzedni miesiąc
|
|
prev_month_end = month_start - timedelta(seconds=1)
|
|
prev_month_start = prev_month_end.replace(day=1, hour=0, minute=0, second=0, microsecond=0)
|
|
prev_month_incidents = [i for i in all_incidents if prev_month_start <= i.started_at < month_start]
|
|
prev_month_downtime = sum(i.duration_seconds or 0 for i in prev_month_incidents)
|
|
|
|
data['monthly_report'] = {
|
|
'month': now.strftime('%B %Y'),
|
|
'uptime_pct': month_uptime_pct,
|
|
'total_downtime_seconds': month_downtime,
|
|
'total_downtime_human': _format_duration(month_downtime),
|
|
'incidents_count': len(month_incidents),
|
|
'longest_incident': _format_duration(max((i.duration_seconds or 0 for i in month_incidents), default=0)),
|
|
'prev_month': prev_month_end.strftime('%B %Y'),
|
|
'prev_downtime_seconds': prev_month_downtime,
|
|
'prev_downtime_human': _format_duration(prev_month_downtime),
|
|
'prev_incidents_count': len(prev_month_incidents),
|
|
'trend': 'better' if month_downtime < prev_month_downtime else ('worse' if month_downtime > prev_month_downtime else 'same')
|
|
}
|
|
|
|
# SLA kontekst
|
|
data['sla_context'] = {
|
|
'99.9': {'max_downtime_month': '43 min', 'max_downtime_year': '8h 46min'},
|
|
'99.5': {'max_downtime_month': '3h 36min', 'max_downtime_year': '1d 19h'},
|
|
'99.0': {'max_downtime_month': '7h 18min', 'max_downtime_year': '3d 15h'},
|
|
}
|
|
|
|
return data
|
|
|
|
|
|
def _format_duration(seconds):
|
|
"""Formatuj sekundy na czytelny tekst"""
|
|
if not seconds or seconds <= 0:
|
|
return '0s'
|
|
if seconds < 60:
|
|
return f'{seconds}s'
|
|
if seconds < 3600:
|
|
m = seconds // 60
|
|
s = seconds % 60
|
|
return f'{m}min {s}s' if s else f'{m}min'
|
|
h = seconds // 3600
|
|
m = (seconds % 3600) // 60
|
|
return f'{h}h {m}min' if m else f'{h}h'
|
|
|
|
|
|
@bp.route('/uptime')
|
|
@login_required
|
|
@role_required(SystemRole.OFFICE_MANAGER)
|
|
def admin_uptime():
|
|
"""Dashboard monitoringu uptime"""
|
|
db = SessionLocal()
|
|
try:
|
|
data = _get_uptime_data(db, days=30)
|
|
return render_template('admin/uptime_dashboard.html', data=data)
|
|
finally:
|
|
db.close()
|
|
|
|
|
|
@bp.route('/api/uptime')
|
|
@login_required
|
|
@role_required(SystemRole.OFFICE_MANAGER)
|
|
def api_admin_uptime():
|
|
"""API endpoint dla auto-refresh dashboardu uptime"""
|
|
db = SessionLocal()
|
|
try:
|
|
days = request.args.get('days', 30, type=int)
|
|
data = _get_uptime_data(db, days=min(days, 90))
|
|
data['timestamp'] = datetime.now().isoformat()
|
|
data['success'] = True
|
|
return jsonify(data)
|
|
finally:
|
|
db.close()
|
|
|
|
|
|
@bp.route('/api/uptime/incident/<int:incident_id>/notes', methods=['POST'])
|
|
@login_required
|
|
@role_required(SystemRole.OFFICE_MANAGER)
|
|
def api_update_incident_notes(incident_id):
|
|
"""Aktualizuj notatki incydentu"""
|
|
db = SessionLocal()
|
|
try:
|
|
incident = db.query(UptimeIncident).get(incident_id)
|
|
if not incident:
|
|
return jsonify({'success': False, 'error': 'Incident not found'}), 404
|
|
|
|
data = request.get_json()
|
|
if data and 'notes' in data:
|
|
incident.notes = data['notes']
|
|
if data and 'cause' in data and data['cause'] in ('isp', 'server', 'infra', 'unknown'):
|
|
incident.cause = data['cause']
|
|
|
|
db.commit()
|
|
return jsonify({'success': True})
|
|
except Exception as e:
|
|
db.rollback()
|
|
return jsonify({'success': False, 'error': str(e)}), 500
|
|
finally:
|
|
db.close()
|