This commit is contained in:
Mateusz Gruszczyński
2026-05-08 11:52:01 +02:00
parent f445d25c5d
commit cd6c4fad85
9 changed files with 293 additions and 16 deletions

View File

@@ -167,6 +167,7 @@ def save_preferences(data: dict, user_id: int | None = None):
peers_refresh_seconds = data.get("peers_refresh_seconds")
port_check_enabled = data.get("port_check_enabled")
footer_items_json = data.get("footer_items_json")
title_speed_enabled = data.get("title_speed_enabled")
with connect() as conn:
now = utcnow()
if allowed_theme:
@@ -183,6 +184,9 @@ def save_preferences(data: dict, user_id: int | None = None):
conn.execute("UPDATE user_preferences SET peers_refresh_seconds=?, updated_at=? WHERE user_id=?", (sec, now, user_id))
if port_check_enabled is not None:
conn.execute("UPDATE user_preferences SET port_check_enabled=?, updated_at=? WHERE user_id=?", (1 if port_check_enabled else 0, now, user_id))
if title_speed_enabled is not None:
# Notatka: preferencja steruje wyświetlaniem bieżącego DL/UL w tytule karty przeglądarki.
conn.execute("UPDATE user_preferences SET title_speed_enabled=?, updated_at=? WHERE user_id=?", (1 if title_speed_enabled else 0, now, user_id))
if footer_items_json is not None:
# Note: Store only JSON objects so footer visibility can be extended without schema churn.
value = footer_items_json if isinstance(footer_items_json, str) else json.dumps(footer_items_json)

View File

@@ -0,0 +1,159 @@
from __future__ import annotations
import threading
from typing import Any
from ..db import connect, utcnow
from .rtorrent import human_rate
_SESSION_STARTED_AT = utcnow()
_CACHE: dict[int, dict[str, Any]] = {}
_LOADED = False
_LOCK = threading.Lock()
def _empty_peak(profile_id: int, all_time: dict[str, Any] | None = None) -> dict[str, Any]:
# Notatka: jedna struktura w pamięci trzyma bieżącą sesję i rekord ogólny dla profilu rTorrent.
all_time = all_time or {}
return {
"profile_id": int(profile_id),
"session_started_at": _SESSION_STARTED_AT,
"session_down_peak": 0,
"session_up_peak": 0,
"session_down_peak_at": None,
"session_up_peak_at": None,
"all_time_down_peak": int(all_time.get("all_time_down_peak") or 0),
"all_time_up_peak": int(all_time.get("all_time_up_peak") or 0),
"all_time_down_peak_at": all_time.get("all_time_down_peak_at"),
"all_time_up_peak_at": all_time.get("all_time_up_peak_at"),
}
def load_cache() -> None:
# Notatka: rekordy ogólne są ładowane przy starcie aplikacji, a rekord sesji zaczyna się od zera.
global _LOADED
with _LOCK:
if _LOADED:
return
with connect() as conn:
rows = conn.execute("SELECT * FROM transfer_speed_peaks").fetchall()
for row in rows:
profile_id = int(row.get("profile_id") or 0)
if profile_id:
_CACHE[profile_id] = _empty_peak(profile_id, row)
_LOADED = True
def _ensure_profile(profile_id: int) -> dict[str, Any]:
# Notatka: leniwe ładowanie chroni nowe profile dodane po starcie przed pustymi rekordami.
profile_id = int(profile_id)
item = _CACHE.get(profile_id)
if item:
return item
with connect() as conn:
row = conn.execute("SELECT * FROM transfer_speed_peaks WHERE profile_id=?", (profile_id,)).fetchone()
item = _empty_peak(profile_id, row)
_CACHE[profile_id] = item
return item
def _persist(item: dict[str, Any]) -> None:
# Notatka: SQLite dostaje zapis tylko wtedy, gdy pojawił się nowy rekord sesji lub rekord ogólny.
now = utcnow()
with connect() as conn:
conn.execute(
"""
INSERT INTO transfer_speed_peaks(
profile_id, session_started_at, session_down_peak, session_up_peak,
session_down_peak_at, session_up_peak_at, all_time_down_peak,
all_time_up_peak, all_time_down_peak_at, all_time_up_peak_at,
created_at, updated_at
) VALUES(?,?,?,?,?,?,?,?,?,?,?,?)
ON CONFLICT(profile_id) DO UPDATE SET
session_started_at=excluded.session_started_at,
session_down_peak=excluded.session_down_peak,
session_up_peak=excluded.session_up_peak,
session_down_peak_at=excluded.session_down_peak_at,
session_up_peak_at=excluded.session_up_peak_at,
all_time_down_peak=excluded.all_time_down_peak,
all_time_up_peak=excluded.all_time_up_peak,
all_time_down_peak_at=excluded.all_time_down_peak_at,
all_time_up_peak_at=excluded.all_time_up_peak_at,
updated_at=excluded.updated_at
""",
(
int(item["profile_id"]),
item["session_started_at"],
int(item["session_down_peak"]),
int(item["session_up_peak"]),
item.get("session_down_peak_at"),
item.get("session_up_peak_at"),
int(item["all_time_down_peak"]),
int(item["all_time_up_peak"]),
item.get("all_time_down_peak_at"),
item.get("all_time_up_peak_at"),
now,
now,
),
)
def _public(item: dict[str, Any]) -> dict[str, Any]:
# Notatka: frontend dostaje zarówno bajty/s, jak i gotowe etykiety w stylu istniejących prędkości.
return {
"session_started_at": item["session_started_at"],
"session": {
"down": int(item["session_down_peak"]),
"up": int(item["session_up_peak"]),
"down_h": human_rate(int(item["session_down_peak"])),
"up_h": human_rate(int(item["session_up_peak"])),
"down_at": item.get("session_down_peak_at"),
"up_at": item.get("session_up_peak_at"),
},
"all_time": {
"down": int(item["all_time_down_peak"]),
"up": int(item["all_time_up_peak"]),
"down_h": human_rate(int(item["all_time_down_peak"])),
"up_h": human_rate(int(item["all_time_up_peak"])),
"down_at": item.get("all_time_down_peak_at"),
"up_at": item.get("all_time_up_peak_at"),
},
}
def record(profile_id: int, down_rate: int = 0, up_rate: int = 0) -> dict[str, Any]:
# Notatka: poller wywołuje tę funkcję w tle; baza jest aktualizowana tylko po przebiciu rekordu.
load_cache()
down_rate = max(0, int(down_rate or 0))
up_rate = max(0, int(up_rate or 0))
measured_at = utcnow()
changed = False
with _LOCK:
item = _ensure_profile(int(profile_id))
if down_rate > int(item["session_down_peak"]):
item["session_down_peak"] = down_rate
item["session_down_peak_at"] = measured_at
changed = True
if up_rate > int(item["session_up_peak"]):
item["session_up_peak"] = up_rate
item["session_up_peak_at"] = measured_at
changed = True
if down_rate > int(item["all_time_down_peak"]):
item["all_time_down_peak"] = down_rate
item["all_time_down_peak_at"] = measured_at
changed = True
if up_rate > int(item["all_time_up_peak"]):
item["all_time_up_peak"] = up_rate
item["all_time_up_peak_at"] = measured_at
changed = True
result = _public(item)
if changed:
_persist(item)
return result
def current(profile_id: int) -> dict[str, Any]:
# Notatka: REST API może pokazać ostatni znany rekord bez wymuszania nowego pomiaru.
load_cache()
with _LOCK:
return _public(_ensure_profile(int(profile_id)))

View File

@@ -7,7 +7,7 @@ from ..config import POLL_INTERVAL
from .preferences import active_profile, get_profile
from .torrent_cache import torrent_cache
from .torrent_summary import cached_summary
from . import rtorrent, smart_queue, traffic_history, automation_rules, torrent_stats, auth
from . import rtorrent, smart_queue, traffic_history, automation_rules, torrent_stats, auth, speed_peaks
def _profile_room(profile_id: int) -> str:
@@ -59,6 +59,8 @@ def register_socketio_handlers(socketio):
status["usage_available"] = True
status["profile_id"] = pid
traffic_history.record(pid, status.get("down_rate", 0), status.get("up_rate", 0), status.get("total_down", 0), status.get("total_up", 0))
# Notatka: najwyższe DL/UL są liczone w tle razem z istniejącym pollerem i zapisywane tylko po przebiciu rekordu.
status["speed_peaks"] = speed_peaks.record(pid, status.get("down_rate", 0), status.get("up_rate", 0))
_emit_profile(socketio, "system_stats", status, pid)
heartbeat["ok"] = True
except Exception as exc: