queue_stopped #3

Merged
gru merged 33 commits from queue_stopped into master 2026-05-08 23:45:33 +02:00
23 changed files with 1669 additions and 230 deletions

View File

@@ -25,4 +25,4 @@ PYTORRENT_TRAFFIC_HISTORY_RETENTION_DAYS=90
PYTORRENT_JOBS_RETENTION_DAYS=30 PYTORRENT_JOBS_RETENTION_DAYS=30
PYTORRENT_SMART_QUEUE_HISTORY_RETENTION_DAYS=30 PYTORRENT_SMART_QUEUE_HISTORY_RETENTION_DAYS=30
PYTORRENT_LOG_RETENTION_DAYS=30 PYTORRENT_LOG_RETENTION_DAYS=30
PYTORRENT_SMART_QUEUE_LABEL="Smart Queue Paused" PYTORRENT_SMART_QUEUE_LABEL="Smart Queue"

2
.gitignore vendored
View File

@@ -34,6 +34,8 @@ storage/*
*.sqlite3-shm *.sqlite3-shm
*.sqlite3 *.sqlite3
data/* data/*
!data/tracker_favicons
data/tracker_favicons/*.ico
logs/* logs/*
todo.txt todo.txt

View File

@@ -30,7 +30,6 @@ def _wants_json_response() -> bool:
def register_error_pages(app: Flask) -> None: def register_error_pages(app: Flask) -> None:
# Notatka: własne strony błędów zastępują generyczne 404/500 i zachowują JSON dla API.
@app.errorhandler(404) @app.errorhandler(404)
def not_found(error): def not_found(error):
if _wants_json_response(): if _wants_json_response():
@@ -92,7 +91,6 @@ def create_app() -> Flask:
return url_for("static", filename=filename) return url_for("static", filename=filename)
def frontend_asset_url(key: str) -> str: def frontend_asset_url(key: str) -> str:
# Notatka: helper przełącza szablony między CDN i lokalnymi plikami bez duplikowania logiki.
path = asset_path(key) path = asset_path(key)
return path if path.startswith("http") else static_url(path) return path if path.startswith("http") else static_url(path)
@@ -108,12 +106,19 @@ def create_app() -> Flask:
@app.after_request @app.after_request
def cache_headers(response): def cache_headers(response):
response.headers.pop('Content-Disposition', None) response.headers.pop("Content-Disposition", None)
if request.endpoint == "static": static_file = request.path.startswith("/static/")
tracker_icon = request.path.startswith("/static/tracker_favicons/")
favicon_ico = request.path == "/favicon.ico"
if static_file and not tracker_icon:
response.headers["Cache-Control"] = "public, max-age=31536000, immutable" response.headers["Cache-Control"] = "public, max-age=31536000, immutable"
elif favicon_ico:
response.headers["Cache-Control"] = "public, max-age=86400"
else: else:
response.headers["Cache-Control"] = "no-store, private" response.headers["Cache-Control"] = "no-store, private"
return response return response
from .routes.main import bp as main_bp from .routes.main import bp as main_bp
@@ -122,6 +127,8 @@ def create_app() -> Flask:
app.register_blueprint(api_bp) app.register_blueprint(api_bp)
register_error_pages(app) register_error_pages(app)
init_db() init_db()
from .services.speed_peaks import load_cache
load_cache()
from .services.auth import install_guards from .services.auth import install_guards
install_guards(app) install_guards(app)

View File

@@ -3,9 +3,11 @@ from __future__ import annotations
import argparse import argparse
import getpass import getpass
import sys import sys
import json
from .db import connect, init_db, utcnow from .db import connect, init_db, utcnow
from .services.auth import password_hash from .services.auth import password_hash
from .services import tracker_cache
def reset_password(username: str, password: str) -> bool: def reset_password(username: str, password: str) -> bool:
@@ -30,6 +32,24 @@ def reset_password(username: str, password: str) -> bool:
return True return True
def fetch_tracker_favicon(domain: str, refresh: bool = True, debug: bool = False) -> str:
"""Note: Download or refresh one tracker favicon from CLI without starting the web server."""
clean = tracker_cache.tracker_domain(domain)
if not clean:
raise ValueError("Tracker domain is required")
init_db()
path, mime = tracker_cache.favicon_path(clean, enabled=True, force=refresh)
row = tracker_cache.favicon_cache_row(clean)
if not path:
detail = (row or {}).get("error") if row else "favicon not found"
if debug and row:
raise RuntimeError(f"{detail or 'favicon not found'}; cache={json.dumps(dict(row), default=str)}")
raise RuntimeError(str(detail or "favicon not found"))
if debug and row:
return f"{path} ({mime or 'unknown'}) cache={json.dumps(dict(row), default=str)}"
return f"{path} ({mime or 'unknown'})"
def _password_from_args(args: argparse.Namespace) -> str: def _password_from_args(args: argparse.Namespace) -> str:
"""Note: Allow the password to be passed as an argument or entered securely in interactive mode.""" """Note: Allow the password to be passed as an argument or entered securely in interactive mode."""
if args.password is not None: if args.password is not None:
@@ -51,6 +71,12 @@ def build_parser() -> argparse.ArgumentParser:
reset.add_argument("password", nargs="?", help="New password; omit to type it interactively") reset.add_argument("password", nargs="?", help="New password; omit to type it interactively")
reset.set_defaults(func=_cmd_reset_password) reset.set_defaults(func=_cmd_reset_password)
icon = sub.add_parser("tracker-favicon", help="Download or refresh a tracker favicon cache file")
icon.add_argument("domain", help="Tracker domain, e.g. t.pte.nu")
icon.add_argument("--no-refresh", action="store_true", help="Use fresh cache when available")
icon.add_argument("--debug", action="store_true", help="Print cache diagnostics on success or failure")
icon.set_defaults(func=_cmd_tracker_favicon)
return parser return parser
@@ -64,6 +90,12 @@ def _cmd_reset_password(args: argparse.Namespace) -> int:
return 1 return 1
def _cmd_tracker_favicon(args: argparse.Namespace) -> int:
"""Note: Run favicon discovery from CLI and print the saved file path."""
print(fetch_tracker_favicon(args.domain, refresh=not args.no_refresh, debug=bool(args.debug)))
return 0
def main(argv: list[str] | None = None) -> int: def main(argv: list[str] | None = None) -> int:
"""Note: Main CLI entrypoint with error handling and without starting the web app.""" """Note: Main CLI entrypoint with error handling and without starting the web app."""
parser = build_parser() parser = build_parser()

View File

@@ -70,5 +70,5 @@ TRAFFIC_HISTORY_RETENTION_DAYS = _env_int("PYTORRENT_TRAFFIC_HISTORY_RETENTION_D
JOBS_RETENTION_DAYS = _env_int("PYTORRENT_JOBS_RETENTION_DAYS", 30, 1) JOBS_RETENTION_DAYS = _env_int("PYTORRENT_JOBS_RETENTION_DAYS", 30, 1)
SMART_QUEUE_HISTORY_RETENTION_DAYS = _env_int("PYTORRENT_SMART_QUEUE_HISTORY_RETENTION_DAYS", 30, 1) SMART_QUEUE_HISTORY_RETENTION_DAYS = _env_int("PYTORRENT_SMART_QUEUE_HISTORY_RETENTION_DAYS", 30, 1)
LOG_RETENTION_DAYS = _env_int("PYTORRENT_LOG_RETENTION_DAYS", 30, 1) LOG_RETENTION_DAYS = _env_int("PYTORRENT_LOG_RETENTION_DAYS", 30, 1)
SMART_QUEUE_LABEL = os.getenv("PYTORRENT_SMART_QUEUE_LABEL", "Smart Queue Paused") SMART_QUEUE_LABEL = os.getenv("PYTORRENT_SMART_QUEUE_LABEL", "Smart Queue Stopped")
SMART_QUEUE_STALLED_LABEL = os.getenv("PYTORRENT_SMART_QUEUE_STALLED_LABEL", "Stalled") SMART_QUEUE_STALLED_LABEL = os.getenv("PYTORRENT_SMART_QUEUE_STALLED_LABEL", "Stalled")

View File

@@ -39,6 +39,9 @@ CREATE TABLE IF NOT EXISTS user_preferences (
peers_refresh_seconds INTEGER DEFAULT 0, peers_refresh_seconds INTEGER DEFAULT 0,
port_check_enabled INTEGER DEFAULT 0, port_check_enabled INTEGER DEFAULT 0,
footer_items_json TEXT, footer_items_json TEXT,
title_speed_enabled INTEGER DEFAULT 0,
tracker_favicons_enabled INTEGER DEFAULT 0,
interface_scale INTEGER DEFAULT 100,
created_at TEXT NOT NULL, created_at TEXT NOT NULL,
updated_at TEXT NOT NULL, updated_at TEXT NOT NULL,
FOREIGN KEY(user_id) REFERENCES users(id) FOREIGN KEY(user_id) REFERENCES users(id)
@@ -140,6 +143,8 @@ CREATE TABLE IF NOT EXISTS smart_queue_settings (
min_speed_bytes INTEGER DEFAULT 1024, min_speed_bytes INTEGER DEFAULT 1024,
min_seeds INTEGER DEFAULT 1, min_seeds INTEGER DEFAULT 1,
min_peers INTEGER DEFAULT 0, min_peers INTEGER DEFAULT 0,
ignore_seed_peer INTEGER DEFAULT 0,
ignore_speed INTEGER DEFAULT 0,
manage_stopped INTEGER DEFAULT 0, manage_stopped INTEGER DEFAULT 0,
updated_at TEXT NOT NULL, updated_at TEXT NOT NULL,
PRIMARY KEY(user_id, profile_id) PRIMARY KEY(user_id, profile_id)
@@ -150,6 +155,7 @@ CREATE TABLE IF NOT EXISTS smart_queue_stalled (
torrent_hash TEXT NOT NULL, torrent_hash TEXT NOT NULL,
first_stalled_at TEXT NOT NULL, first_stalled_at TEXT NOT NULL,
updated_at TEXT NOT NULL, updated_at TEXT NOT NULL,
timer_key TEXT DEFAULT '',
PRIMARY KEY(profile_id, torrent_hash) PRIMARY KEY(profile_id, torrent_hash)
); );
@@ -197,6 +203,22 @@ CREATE TABLE IF NOT EXISTS traffic_history (
CREATE INDEX IF NOT EXISTS idx_traffic_history_profile_created ON traffic_history(profile_id, created_at); CREATE INDEX IF NOT EXISTS idx_traffic_history_profile_created ON traffic_history(profile_id, created_at);
CREATE TABLE IF NOT EXISTS transfer_speed_peaks (
profile_id INTEGER PRIMARY KEY,
session_started_at TEXT NOT NULL,
session_down_peak INTEGER DEFAULT 0,
session_up_peak INTEGER DEFAULT 0,
session_down_peak_at TEXT,
session_up_peak_at TEXT,
all_time_down_peak INTEGER DEFAULT 0,
all_time_up_peak INTEGER DEFAULT 0,
all_time_down_peak_at TEXT,
all_time_up_peak_at TEXT,
created_at TEXT NOT NULL,
updated_at TEXT NOT NULL,
FOREIGN KEY(profile_id) REFERENCES rtorrent_profiles(id) ON DELETE CASCADE
);
CREATE TABLE IF NOT EXISTS automation_rules ( CREATE TABLE IF NOT EXISTS automation_rules (
id INTEGER PRIMARY KEY AUTOINCREMENT, id INTEGER PRIMARY KEY AUTOINCREMENT,
user_id INTEGER NOT NULL, user_id INTEGER NOT NULL,
@@ -257,6 +279,26 @@ CREATE TABLE IF NOT EXISTS torrent_stats_cache (
updated_at TEXT NOT NULL, updated_at TEXT NOT NULL,
updated_epoch REAL DEFAULT 0 updated_epoch REAL DEFAULT 0
); );
CREATE TABLE IF NOT EXISTS tracker_summary_cache (
profile_id INTEGER NOT NULL,
torrent_hash TEXT NOT NULL,
trackers_json TEXT NOT NULL,
updated_at TEXT NOT NULL,
updated_epoch REAL DEFAULT 0,
PRIMARY KEY(profile_id, torrent_hash)
);
CREATE INDEX IF NOT EXISTS idx_tracker_summary_cache_profile ON tracker_summary_cache(profile_id, updated_epoch);
CREATE TABLE IF NOT EXISTS tracker_favicon_cache (
domain TEXT PRIMARY KEY,
source_url TEXT,
file_path TEXT,
mime_type TEXT,
updated_at TEXT NOT NULL,
updated_epoch REAL DEFAULT 0,
error TEXT
);
""" """
MIGRATIONS = [ MIGRATIONS = [
@@ -269,6 +311,9 @@ MIGRATIONS = [
"ALTER TABLE user_preferences ADD COLUMN bootstrap_theme TEXT DEFAULT 'default'", "ALTER TABLE user_preferences ADD COLUMN bootstrap_theme TEXT DEFAULT 'default'",
"ALTER TABLE user_preferences ADD COLUMN font_family TEXT DEFAULT 'default'", "ALTER TABLE user_preferences ADD COLUMN font_family TEXT DEFAULT 'default'",
"ALTER TABLE user_preferences ADD COLUMN footer_items_json TEXT", "ALTER TABLE user_preferences ADD COLUMN footer_items_json TEXT",
"ALTER TABLE user_preferences ADD COLUMN title_speed_enabled INTEGER DEFAULT 0",
"ALTER TABLE user_preferences ADD COLUMN tracker_favicons_enabled INTEGER DEFAULT 0",
"ALTER TABLE user_preferences ADD COLUMN interface_scale INTEGER DEFAULT 100",
"ALTER TABLE rtorrent_profiles ADD COLUMN max_parallel_jobs INTEGER DEFAULT 5", "ALTER TABLE rtorrent_profiles ADD COLUMN max_parallel_jobs INTEGER DEFAULT 5",
"ALTER TABLE rtorrent_profiles ADD COLUMN is_remote INTEGER DEFAULT 0", "ALTER TABLE rtorrent_profiles ADD COLUMN is_remote INTEGER DEFAULT 0",
"ALTER TABLE jobs ADD COLUMN attempts INTEGER DEFAULT 0", "ALTER TABLE jobs ADD COLUMN attempts INTEGER DEFAULT 0",
@@ -282,6 +327,12 @@ MIGRATIONS = [
"ALTER TABLE torrent_stats_cache ADD COLUMN updated_epoch REAL DEFAULT 0", "ALTER TABLE torrent_stats_cache ADD COLUMN updated_epoch REAL DEFAULT 0",
"ALTER TABLE smart_queue_settings ADD COLUMN manage_stopped INTEGER DEFAULT 0", "ALTER TABLE smart_queue_settings ADD COLUMN manage_stopped INTEGER DEFAULT 0",
"ALTER TABLE smart_queue_settings ADD COLUMN min_peers INTEGER DEFAULT 0", "ALTER TABLE smart_queue_settings ADD COLUMN min_peers INTEGER DEFAULT 0",
"ALTER TABLE smart_queue_settings ADD COLUMN ignore_seed_peer INTEGER DEFAULT 0",
"ALTER TABLE smart_queue_settings ADD COLUMN ignore_speed INTEGER DEFAULT 0",
"ALTER TABLE smart_queue_stalled ADD COLUMN timer_key TEXT DEFAULT ''",
"CREATE TABLE IF NOT EXISTS tracker_summary_cache (profile_id INTEGER NOT NULL, torrent_hash TEXT NOT NULL, trackers_json TEXT NOT NULL, updated_at TEXT NOT NULL, updated_epoch REAL DEFAULT 0, PRIMARY KEY(profile_id, torrent_hash))",
"CREATE INDEX IF NOT EXISTS idx_tracker_summary_cache_profile ON tracker_summary_cache(profile_id, updated_epoch)",
"CREATE TABLE IF NOT EXISTS tracker_favicon_cache (domain TEXT PRIMARY KEY, source_url TEXT, file_path TEXT, mime_type TEXT, updated_at TEXT NOT NULL, updated_epoch REAL DEFAULT 0, error TEXT)",
] ]

View File

@@ -13,11 +13,11 @@ import socket
import json import json
import psutil import psutil
import xml.etree.ElementTree as ET import xml.etree.ElementTree as ET
from flask import Blueprint, jsonify, request, abort from flask import Blueprint, jsonify, request, abort, send_file, redirect
from ..config import DB_PATH, JOBS_RETENTION_DAYS, SMART_QUEUE_HISTORY_RETENTION_DAYS, WORKERS from ..config import DB_PATH, JOBS_RETENTION_DAYS, SMART_QUEUE_HISTORY_RETENTION_DAYS, WORKERS
from ..db import connect, utcnow from ..db import connect, utcnow
from ..services.auth import current_user_id as default_user_id, current_user, list_users, save_user, delete_user, login_user, logout_user, enabled as auth_enabled, require_profile_write from ..services.auth import current_user_id as default_user_id, current_user, list_users, save_user, delete_user, login_user, logout_user, enabled as auth_enabled, require_profile_write
from ..services import preferences, rtorrent, torrent_stats from ..services import preferences, rtorrent, torrent_stats, speed_peaks, tracker_cache
from ..services.torrent_cache import torrent_cache from ..services.torrent_cache import torrent_cache
from ..services.torrent_summary import cached_summary from ..services.torrent_summary import cached_summary
from ..services.workers import enqueue, list_jobs, cancel_job, retry_job, clear_jobs, emergency_clear_jobs from ..services.workers import enqueue, list_jobs, cancel_job, retry_job, clear_jobs, emergency_clear_jobs
@@ -484,6 +484,58 @@ def torrents():
}) })
@bp.get("/trackers/summary")
def trackers_summary():
profile = preferences.active_profile()
if not profile:
return ok({"summary": {"hashes": {}, "trackers": [], "errors": [], "scanned": 0, "pending": 0}, "error": "No profile"})
try:
# Note: Tracker summary returns cached data immediately; optional warmup scans rTorrent in the background for very large libraries.
scan_limit = min(250, max(0, int(request.args.get("scan_limit") or 0)))
bg_limit = min(250, max(1, int(request.args.get("bg_limit") or 80)))
warm = str(request.args.get("warm") or "").lower() in {"1", "true", "yes"}
hashes = [t.get("hash") for t in torrent_cache.snapshot(profile["id"]) if t.get("hash")]
prefs = preferences.get_preferences()
include_favicons = bool(prefs and prefs.get("tracker_favicons_enabled"))
loader = lambda h: rtorrent.torrent_trackers(profile, h)
summary = tracker_cache.summary(profile, hashes, loader, scan_limit=scan_limit, include_favicons=include_favicons)
if warm and int(summary.get("pending") or 0) > 0:
summary["warming"] = tracker_cache.warm_summary_cache(profile, hashes, loader, batch_size=bg_limit)
return ok({"summary": summary})
except Exception as exc:
return ok({"summary": {"hashes": {}, "trackers": [], "errors": [{"error": str(exc)}], "scanned": 0, "pending": 0}, "error": str(exc)})
@bp.get("/trackers/favicon/<path:domain>")
@bp.get("/tracker-favicon/<path:domain>")
def tracker_favicon(domain: str):
prefs = preferences.get_preferences()
force = str(request.args.get("refresh") or "").lower() in {"1", "true", "yes", "force"}
# Note: Manual refresh must work from CLI even when tracker favicons are disabled in Preferences.
enabled = force or bool(prefs and prefs.get("tracker_favicons_enabled"))
static_url = tracker_cache.favicon_public_url(domain, enabled=enabled, create=True, force=force)
if static_url:
# Note: The API only discovers/cache-warms the icon; the browser receives the file from /static/tracker_favicons/.
return redirect(static_url, code=302)
cached = tracker_cache.favicon_cache_row(domain)
return jsonify({
"ok": False,
"error": "favicon not found",
"domain": tracker_cache.tracker_domain(domain),
"enabled": bool(enabled),
"cached_error": (cached or {}).get("error") if cached else None,
}), 404
@bp.get("/trackers/favicon")
def tracker_favicon_query():
# Note: Query-string alias makes cache warming easier from shell scripts where path routing/proxies may differ.
domain = str(request.args.get("domain") or "").strip()
if not domain:
return jsonify({"ok": False, "error": "domain is required"}), 400
return tracker_favicon(domain)
@bp.get("/torrent-stats") @bp.get("/torrent-stats")
def torrent_stats_get(): def torrent_stats_get():
profile = preferences.active_profile() profile = preferences.active_profile()
@@ -629,6 +681,8 @@ def system_status():
status["ram"] = psutil.virtual_memory().percent status["ram"] = psutil.virtual_memory().percent
status["usage_source"] = "local" status["usage_source"] = "local"
status["usage_available"] = True status["usage_available"] = True
# Notatka: REST status zwraca ostatnie rekordy bez czekania na kolejny komunikat Socket.IO.
status["speed_peaks"] = speed_peaks.record(profile["id"], status.get("down_rate", 0), status.get("up_rate", 0))
return ok({"status": status}) return ok({"status": status})
except Exception as exc: except Exception as exc:
return jsonify({"ok": False, "error": str(exc)}) return jsonify({"ok": False, "error": str(exc)})
@@ -670,6 +724,11 @@ def app_status():
status["scgi"] = rtorrent.scgi_diagnostics(profile) status["scgi"] = rtorrent.scgi_diagnostics(profile)
except Exception as exc: except Exception as exc:
status["scgi"] = {"ok": False, "error": str(exc), "url": profile.get("scgi_url")} status["scgi"] = {"ok": False, "error": str(exc), "url": profile.get("scgi_url")}
try:
# Notatka: panel diagnostyczny pokazuje te same rekordy DL/UL co stopka.
status["speed_peaks"] = speed_peaks.current(profile["id"])
except Exception as exc:
status["speed_peaks"] = {"error": str(exc)}
try: try:
prefs = preferences.get_preferences() prefs = preferences.get_preferences()
status["port_check"] = {"status": "disabled", "enabled": False} if not bool((prefs or {}).get("port_check_enabled")) else port_check_status(force=False) status["port_check"] = {"status": "disabled", "enabled": False} if not bool((prefs or {}).get("port_check_enabled")) else port_check_status(force=False)

View File

@@ -5,15 +5,26 @@ from ..services.preferences import get_preferences, list_profiles, active_profil
from ..services import auth from ..services import auth
from ..services.frontend_assets import asset_path from ..services.frontend_assets import asset_path
# for favicon
from flask import current_app, send_from_directory
bp = Blueprint("main", __name__) bp = Blueprint("main", __name__)
def _asset_url(key: str) -> str: def _asset_url(key: str) -> str:
# Notatka: API docs korzysta z tego samego przełącznika CDN/offline co reszta aplikacji.
path = asset_path(key) path = asset_path(key)
return path if path.startswith("http") else url_for("static", filename=path) return path if path.startswith("http") else url_for("static", filename=path)
@bp.get("/favicon.ico")
def favicon_ico():
response = send_from_directory(
current_app.static_folder,
"favicon.svg",
mimetype="image/svg+xml",
)
return response
@bp.route("/login", methods=["GET", "POST"]) @bp.route("/login", methods=["GET", "POST"])
def login(): def login():
@@ -106,7 +117,7 @@ def openapi():
"/api/rss/feeds": {"post": {"summary": "Add RSS feed", "requestBody": {"content": {"application/json": {"schema": {"type": "object"}}}}, "responses": {"200": {"description": "RSS config"}}}}, "/api/rss/feeds": {"post": {"summary": "Add RSS feed", "requestBody": {"content": {"application/json": {"schema": {"type": "object"}}}}, "responses": {"200": {"description": "RSS config"}}}},
"/api/rss/rules": {"post": {"summary": "Add RSS rule", "requestBody": {"content": {"application/json": {"schema": {"type": "object"}}}}, "responses": {"200": {"description": "RSS config"}}}}, "/api/rss/rules": {"post": {"summary": "Add RSS rule", "requestBody": {"content": {"application/json": {"schema": {"type": "object"}}}}, "responses": {"200": {"description": "RSS config"}}}},
"/api/rss/check": {"post": {"summary": "Manually check RSS feeds", "responses": {"200": {"description": "Queued matches"}}}}, "/api/rss/check": {"post": {"summary": "Manually check RSS feeds", "responses": {"200": {"description": "Queued matches"}}}},
"/api/smart-queue": {"get": {"summary": "Get Smart Queue settings, exceptions and history", "parameters": [{"name": "history_limit", "in": "query", "schema": {"type": "integer", "default": 10, "minimum": 1, "maximum": 100}, "description": "Number of Smart Queue history rows to return"}], "responses": {"200": {"description": "Smart Queue config with history and history_total"}}}, "post": {"summary": "Save Smart Queue settings", "requestBody": {"content": {"application/json": {"schema": {"type": "object", "properties": {"enabled": {"type": "boolean"}, "max_active_downloads": {"type": "integer"}, "stalled_seconds": {"type": "integer"}, "min_speed_bytes": {"type": "integer"}, "min_seeds": {"type": "integer"}, "min_peers": {"type": "integer"}}}}}}, "responses": {"200": {"description": "Saved"}}}}, "/api/smart-queue": {"get": {"summary": "Get Smart Queue settings, exceptions and history", "parameters": [{"name": "history_limit", "in": "query", "schema": {"type": "integer", "default": 10, "minimum": 1, "maximum": 100}, "description": "Number of Smart Queue history rows to return"}], "responses": {"200": {"description": "Smart Queue config with history and history_total"}}}, "post": {"summary": "Save Smart Queue settings", "requestBody": {"content": {"application/json": {"schema": {"type": "object", "properties": {"enabled": {"type": "boolean"}, "max_active_downloads": {"type": "integer"}, "stalled_seconds": {"type": "integer"}, "min_speed_bytes": {"type": "integer"}, "min_seeds": {"type": "integer"}, "min_peers": {"type": "integer"}, "ignore_seed_peer": {"type": "boolean"}, "ignore_speed": {"type": "boolean"}}}}}}, "responses": {"200": {"description": "Saved"}}}},
"/api/smart-queue/check": {"post": {"summary": "Run Smart Queue immediately", "responses": {"200": {"description": "Smart Queue action result"}}}}, "/api/smart-queue/check": {"post": {"summary": "Run Smart Queue immediately", "responses": {"200": {"description": "Smart Queue action result"}}}},
"/api/smart-queue/exclusion": {"post": {"summary": "Add or remove a torrent Smart Queue exception", "requestBody": {"content": {"application/json": {"schema": {"type": "object", "properties": {"hash": {"type": "string"}, "excluded": {"type": "boolean"}, "reason": {"type": "string"}}}}}}, "responses": {"200": {"description": "Exception list"}}}}, "/api/smart-queue/exclusion": {"post": {"summary": "Add or remove a torrent Smart Queue exception", "requestBody": {"content": {"application/json": {"schema": {"type": "object", "properties": {"hash": {"type": "string"}, "excluded": {"type": "boolean"}, "reason": {"type": "string"}}}}}}, "responses": {"200": {"description": "Exception list"}}}},
"/api/traffic/history": {"get": {"summary": "Transfer history for charts", "parameters": [{"name": "range", "in": "query", "schema": {"type": "string", "enum": ["15m", "1h", "3h", "6h", "24h", "7d", "30d", "90d"]}}], "responses": {"200": {"description": "Aggregated traffic history"}}}} "/api/traffic/history": {"get": {"summary": "Transfer history for charts", "parameters": [{"name": "range", "in": "query", "schema": {"type": "string", "enum": ["15m", "1h", "3h", "6h", "24h", "7d", "30d", "90d"]}}], "responses": {"200": {"description": "Aggregated traffic history"}}}}

View File

@@ -4,7 +4,6 @@ from pathlib import Path
from ..config import BASE_DIR, USE_OFFLINE_LIBS from ..config import BASE_DIR, USE_OFFLINE_LIBS
# Notatka: jeden manifest utrzymuje spójne adresy CDN i ścieżki lokalne dla trybu offline.
LIBS_STATIC_DIR = "libs" LIBS_STATIC_DIR = "libs"
LIBS_DIR = BASE_DIR / "pytorrent" / "static" / LIBS_STATIC_DIR LIBS_DIR = BASE_DIR / "pytorrent" / "static" / LIBS_STATIC_DIR
BOOTSTRAP_VERSION = "5.3.3" BOOTSTRAP_VERSION = "5.3.3"
@@ -84,7 +83,6 @@ def required_offline_paths() -> list[Path]:
def missing_offline_paths() -> list[Path]: def missing_offline_paths() -> list[Path]:
missing = [path for path in required_offline_paths() if not path.is_file() or path.stat().st_size <= 0] missing = [path for path in required_offline_paths() if not path.is_file() or path.stat().st_size <= 0]
# Notatka: sprawdzane są też zasoby referencjonowane przez CSS, np. fonty ikon i pliki flag.
required_dirs = [ required_dirs = [
LIBS_DIR / f"fontawesome/{FONTAWESOME_VERSION}/webfonts", LIBS_DIR / f"fontawesome/{FONTAWESOME_VERSION}/webfonts",
LIBS_DIR / f"flag-icons/{FLAG_ICONS_VERSION}/flags/4x3", LIBS_DIR / f"flag-icons/{FLAG_ICONS_VERSION}/flags/4x3",
@@ -97,7 +95,6 @@ def missing_offline_paths() -> list[Path]:
def validate_offline_assets() -> None: def validate_offline_assets() -> None:
# Notatka: aplikacja zatrzymuje start, gdy tryb offline jest aktywny, a pliki nie są zainstalowane.
if not USE_OFFLINE_LIBS: if not USE_OFFLINE_LIBS:
return return
missing = missing_offline_paths() missing = missing_offline_paths()

View File

@@ -28,7 +28,6 @@ FONT_FAMILIES = {
} }
def bootstrap_css_url(theme: str | None) -> str: def bootstrap_css_url(theme: str | None) -> str:
# Notatka: zachowana funkcja zwraca aktualny adres motywu, ale źródło wybiera konfiguracja offline.
from .frontend_assets import bootstrap_css_path from .frontend_assets import bootstrap_css_path
return bootstrap_css_path(theme) return bootstrap_css_path(theme)
@@ -167,6 +166,9 @@ def save_preferences(data: dict, user_id: int | None = None):
peers_refresh_seconds = data.get("peers_refresh_seconds") peers_refresh_seconds = data.get("peers_refresh_seconds")
port_check_enabled = data.get("port_check_enabled") port_check_enabled = data.get("port_check_enabled")
footer_items_json = data.get("footer_items_json") footer_items_json = data.get("footer_items_json")
title_speed_enabled = data.get("title_speed_enabled")
tracker_favicons_enabled = data.get("tracker_favicons_enabled")
interface_scale = data.get("interface_scale")
with connect() as conn: with connect() as conn:
now = utcnow() now = utcnow()
if allowed_theme: if allowed_theme:
@@ -183,6 +185,15 @@ def save_preferences(data: dict, user_id: int | None = None):
conn.execute("UPDATE user_preferences SET peers_refresh_seconds=?, updated_at=? WHERE user_id=?", (sec, now, user_id)) conn.execute("UPDATE user_preferences SET peers_refresh_seconds=?, updated_at=? WHERE user_id=?", (sec, now, user_id))
if port_check_enabled is not None: if port_check_enabled is not None:
conn.execute("UPDATE user_preferences SET port_check_enabled=?, updated_at=? WHERE user_id=?", (1 if port_check_enabled else 0, now, user_id)) conn.execute("UPDATE user_preferences SET port_check_enabled=?, updated_at=? WHERE user_id=?", (1 if port_check_enabled else 0, now, user_id))
if title_speed_enabled is not None:
conn.execute("UPDATE user_preferences SET title_speed_enabled=?, updated_at=? WHERE user_id=?", (1 if title_speed_enabled else 0, now, user_id))
if tracker_favicons_enabled is not None:
conn.execute("UPDATE user_preferences SET tracker_favicons_enabled=?, updated_at=? WHERE user_id=?", (1 if tracker_favicons_enabled else 0, now, user_id))
if interface_scale is not None:
scale = int(interface_scale or 100)
if scale < 80: scale = 80
if scale > 140: scale = 140
conn.execute("UPDATE user_preferences SET interface_scale=?, updated_at=? WHERE user_id=?", (scale, now, user_id))
if footer_items_json is not None: if footer_items_json is not None:
# Note: Store only JSON objects so footer visibility can be extended without schema churn. # Note: Store only JSON objects so footer visibility can be extended without schema churn.
value = footer_items_json if isinstance(footer_items_json, str) else json.dumps(footer_items_json) value = footer_items_json if isinstance(footer_items_json, str) else json.dumps(footer_items_json)

View File

@@ -372,6 +372,37 @@ def browse_path(profile: dict, path: str | None = None) -> dict:
POST_CHECK_DOWNLOAD_LABEL = "To download after check" POST_CHECK_DOWNLOAD_LABEL = "To download after check"
_POST_CHECK_WATCH_TTL_SECONDS = 48 * 60 * 60
_POST_CHECK_WATCH_MIN_SECONDS = 2.0
_POST_CHECK_WATCH: dict[int, dict[str, float]] = {}
def _mark_post_check_watch(profile_id: int, torrent_hash: str) -> None:
if not torrent_hash:
return
_POST_CHECK_WATCH.setdefault(int(profile_id), {})[str(torrent_hash)] = time.time()
def _clear_post_check_watch(profile_id: int, torrent_hash: str) -> None:
profile_watch = _POST_CHECK_WATCH.get(int(profile_id))
if not profile_watch:
return
profile_watch.pop(str(torrent_hash), None)
if not profile_watch:
_POST_CHECK_WATCH.pop(int(profile_id), None)
def _is_post_check_watched(profile_id: int, torrent_hash: str) -> bool:
profile_watch = _POST_CHECK_WATCH.get(int(profile_id)) or {}
started_at = profile_watch.get(str(torrent_hash))
if not started_at:
return False
age = time.time() - started_at
if age > _POST_CHECK_WATCH_TTL_SECONDS:
_clear_post_check_watch(profile_id, torrent_hash)
return False
# Note: A short grace period prevents labeling a recheck that was queued but has not visibly entered hashing yet.
return age >= _POST_CHECK_WATCH_MIN_SECONDS
def _label_names(value: str) -> list[str]: def _label_names(value: str) -> list[str]:
@@ -387,65 +418,94 @@ def _label_value(labels: list[str]) -> str:
return ", ".join([label for label in labels if str(label or "").strip()]) return ", ".join([label for label in labels if str(label or "").strip()])
def _without_post_check_download_label(value: str | None) -> str:
return _label_value([label for label in _label_names(str(value or "")) if label != POST_CHECK_DOWNLOAD_LABEL])
def clear_post_check_download_label(c: ScgiRtorrentClient, torrent_hash: str, current_label: str | None = None) -> bool:
label_source = current_label
if label_source is None:
try:
label_source = str(c.call("d.custom1", str(torrent_hash or "")) or "")
except Exception:
label_source = ""
labels = _label_names(str(label_source or ""))
if POST_CHECK_DOWNLOAD_LABEL not in labels:
return False
# Note: The temporary post-check label is removed only after the torrent leaves the stopped waiting queue.
c.call("d.custom1.set", str(torrent_hash or ""), _label_value([label for label in labels if label != POST_CHECK_DOWNLOAD_LABEL]))
return True
def _message_indicates_active_check(message: str) -> bool:
msg = str(message or "").lower()
if not msg:
return False
finished_markers = ("complete", "completed", "finished", "success", "succeeded", "failed", "done")
if any(marker in msg for marker in finished_markers):
return False
active_markers = ("checking", "hashing", "hash check queued", "hash check scheduled", "check hash queued", "recheck queued", "rechecking")
return any(marker in msg for marker in active_markers)
def _row_progress_complete(row: dict) -> bool: def _row_progress_complete(row: dict) -> bool:
size = int(row.get("size") or 0) size = int(row.get("size") or 0)
completed = int(row.get("completed_bytes") or 0) completed = int(row.get("completed_bytes") or 0)
return bool(row.get("complete")) or (size > 0 and completed >= size) or float(row.get("progress") or 0) >= 100.0 return bool(row.get("complete")) or (size > 0 and completed >= size) or float(row.get("progress") or 0) >= 100.0
def _remove_post_check_label_if_finished(c: ScgiRtorrentClient, row: dict) -> bool: def _cleanup_post_check_label_if_ready(c: ScgiRtorrentClient, row: dict) -> bool:
labels = _label_names(str(row.get("label") or "")) labels = _label_names(str(row.get("label") or ""))
if POST_CHECK_DOWNLOAD_LABEL not in labels: if POST_CHECK_DOWNLOAD_LABEL not in labels:
return False return False
status = str(row.get("status") or "").lower() status = str(row.get("status") or "").lower()
if not (_row_progress_complete(row) or status == "seeding"): started_after_wait = bool(int(row.get("state") or 0)) and status != "checking"
if not (_row_progress_complete(row) or status == "seeding" or started_after_wait):
return False return False
labels = [label for label in labels if label != POST_CHECK_DOWNLOAD_LABEL] # Note: Keep the post-check label while the torrent is stopped; remove it once it is started for download/seeding.
value = _label_value(labels) clear_post_check_download_label(c, str(row.get("hash") or ""), str(row.get("label") or ""))
# Note: Clean the temporary label after reaching 100% or entering seeding, even when the state no longer comes directly from recheck. row["label"] = _without_post_check_download_label(str(row.get("label") or ""))
c.call("d.custom1.set", str(row.get("hash") or ""), value)
row["label"] = value
return True return True
def apply_post_check_policy(profile: dict, rows: list[dict], previous_rows: dict[str, dict] | None = None) -> list[dict]: def apply_post_check_policy(profile: dict, rows: list[dict], previous_rows: dict[str, dict] | None = None) -> list[dict]:
"""Start complete torrents after check; pause and label incomplete ones.""" """Start complete torrents after check; stop and label incomplete ones for Smart Queue."""
previous_rows = previous_rows or {} previous_rows = previous_rows or {}
profile_id = int(profile.get("id") or 0)
c = client_for(profile) c = client_for(profile)
changes: list[dict] = [] changes: list[dict] = []
for row in rows: for row in rows:
h = str(row.get("hash") or "") h = str(row.get("hash") or "")
prev = previous_rows.get(h) or {} prev = previous_rows.get(h) or {}
try: try:
if h and _remove_post_check_label_if_finished(c, row): if h and _cleanup_post_check_label_if_ready(c, row):
changes.append({"hash": h, "action": "remove_post_check_label", "complete": True}) changes.append({"hash": h, "action": "remove_post_check_label"})
except Exception as exc: except Exception as exc:
changes.append({"hash": h, "action": "remove_post_check_label_failed", "error": str(exc)}) changes.append({"hash": h, "action": "remove_post_check_label_failed", "error": str(exc)})
was_checking = str(prev.get("status") or "") == "Checking" or int(prev.get("hashing") or 0) > 0 was_checking = str(prev.get("status") or "") == "Checking" or int(prev.get("hashing") or 0) > 0
watched_recheck = _is_post_check_watched(profile_id, h)
is_checking = str(row.get("status") or "") == "Checking" or int(row.get("hashing") or 0) > 0 is_checking = str(row.get("status") or "") == "Checking" or int(row.get("hashing") or 0) > 0
if not h or not was_checking or is_checking: if not h or not (was_checking or watched_recheck) or is_checking:
continue continue
complete = _row_progress_complete(row) complete = _row_progress_complete(row)
try: try:
if complete: if complete:
# Note: After a completed check, a complete torrent is started automatically so it can seed immediately. # Note: A fully checked torrent is started with the same helper as the manual Start action so it seeds immediately.
c.call("d.start", h) start_result = start_or_resume_hash(c, h)
labels = [label for label in _label_names(str(row.get("label") or "")) if label != POST_CHECK_DOWNLOAD_LABEL] clear_post_check_download_label(c, h, str(row.get("label") or ""))
if _label_value(labels) != str(row.get("label") or ""): row.update({"state": 1, "active": 1, "paused": False, "status": "Seeding", "label": _without_post_check_download_label(str(row.get("label") or ""))})
c.call("d.custom1.set", h, _label_value(labels)) changes.append({"hash": h, "action": "start_seed_after_check", "complete": True, "result": start_result})
row["label"] = _label_value(labels)
row.update({"state": 1, "active": 1, "paused": False, "status": "Seeding"})
changes.append({"hash": h, "action": "start", "complete": True})
else: else:
# Note: After check, an incomplete torrent is paused and labeled to show that it needs more downloading.
c.call("d.start", h)
c.call("d.pause", h)
labels = _label_names(str(row.get("label") or "")) labels = _label_names(str(row.get("label") or ""))
if POST_CHECK_DOWNLOAD_LABEL not in labels: if POST_CHECK_DOWNLOAD_LABEL not in labels:
labels.append(POST_CHECK_DOWNLOAD_LABEL) labels.append(POST_CHECK_DOWNLOAD_LABEL)
c.call("d.custom1.set", h, _label_value(labels)) label_value = _label_value(labels)
row.update({"state": 1, "active": 0, "paused": True, "status": "Paused", "label": _label_value(labels)}) # Note: Incomplete torrents are left stopped after check so Smart Queue can start them later within the global limit.
changes.append({"hash": h, "action": "pause_and_label", "complete": False, "label": POST_CHECK_DOWNLOAD_LABEL}) c.call("d.stop", h)
c.call("d.custom1.set", h, label_value)
row.update({"state": 0, "active": 0, "paused": False, "status": "Stopped", "label": label_value})
changes.append({"hash": h, "action": "stop_and_label_after_check", "complete": False, "label": POST_CHECK_DOWNLOAD_LABEL})
_clear_post_check_watch(profile_id, h)
except Exception as exc: except Exception as exc:
changes.append({"hash": h, "action": "post_check_policy_failed", "error": str(exc)}) changes.append({"hash": h, "action": "post_check_policy_failed", "error": str(exc)})
return changes return changes
@@ -489,7 +549,8 @@ def normalize_row(row: list) -> dict:
is_active = int(row[21] or 0) if len(row) > 21 else int(row[2] or 0) is_active = int(row[21] or 0) if len(row) > 21 else int(row[2] or 0)
state = int(row[2] or 0) state = int(row[2] or 0)
complete = int(row[3] or 0) complete = int(row[3] or 0)
is_checking = bool(hashing) or ("hash" in msg_l and ("check" in msg_l or "checking" in msg_l)) or "recheck" in msg_l # Note: d.hashing is authoritative; stale "hash check complete" messages must not keep the UI in Checking forever.
is_checking = bool(hashing) or _message_indicates_active_check(msg_l)
is_paused = bool(state) and not bool(is_active) and not is_checking is_paused = bool(state) and not bool(is_active) and not is_checking
status = "Checking" if is_checking else "Paused" if is_paused else "Seeding" if complete and state else "Downloading" if state else "Stopped" status = "Checking" if is_checking else "Paused" if is_paused else "Seeding" if complete and state else "Downloading" if state else "Stopped"
return { return {
@@ -861,6 +922,49 @@ def _call_first(c: ScgiRtorrentClient, candidates: list[tuple[str, tuple]]) -> d
raise RuntimeError("; ".join(errors)) raise RuntimeError("; ".join(errors))
def _tracker_domain(url: str) -> str:
raw = str(url or '').strip()
if not raw:
return ''
parsed = urlparse(raw if '://' in raw else f'http://{raw}')
host = (parsed.hostname or '').lower().strip('.')
if host.startswith('www.'):
host = host[4:]
return host
def tracker_summary(profile: dict, torrent_hashes: list[str] | None = None, limit: int = 1000) -> dict:
"""Return tracker domains grouped by torrent for the sidebar filter."""
# Note: Tracker summary is read-only and isolated from the normal torrent snapshot, so slow tracker RPC calls cannot break the main list.
hashes = [str(h or '').strip() for h in (torrent_hashes or []) if str(h or '').strip()]
if not hashes:
hashes = [t.get('hash') for t in list_torrents(profile) if t.get('hash')]
hashes = hashes[:max(1, int(limit or 1000))]
by_hash: dict[str, list[dict]] = {}
counts: dict[str, dict] = {}
errors = []
for h in hashes:
try:
items = []
seen = set()
for tr in torrent_trackers(profile, h):
url = str(tr.get('url') or '')
domain = _tracker_domain(url)
if not domain or domain in seen:
continue
seen.add(domain)
item = {'domain': domain, 'url': url}
items.append(item)
row = counts.setdefault(domain, {'domain': domain, 'url': url, 'count': 0})
row['count'] += 1
by_hash[h] = items
except Exception as exc:
errors.append({'hash': h, 'error': str(exc)})
by_hash[h] = []
trackers = sorted(counts.values(), key=lambda x: (-int(x.get('count') or 0), str(x.get('domain') or '')))
return {'hashes': by_hash, 'trackers': trackers, 'errors': errors, 'scanned': len(hashes)}
def _safe_tracker_call(c: ScgiRtorrentClient, method: str, target: str, default=None): def _safe_tracker_call(c: ScgiRtorrentClient, method: str, target: str, default=None):
try: try:
return c.call(method, target) return c.call(method, target)
@@ -880,9 +984,39 @@ def _tracker_int(value, default=None):
return default return default
def _tracker_rows(c: ScgiRtorrentClient, torrent_hash: str) -> list[list]:
fields = ("t.url=", "t.is_enabled=", "t.scrape_complete=", "t.scrape_incomplete=", "t.scrape_downloaded=")
errors: list[str] = []
for args in ((torrent_hash, "", *fields), ("", torrent_hash, *fields)):
try:
rows = c.call("t.multicall", *args)
return [list(r) for r in (rows or [])]
except Exception as exc:
errors.append(f"t.multicall{args[:2]}: {exc}")
# Note: Fallback keeps the sidebar tracker filter usable on rTorrent builds without t.multicall scrape fields.
total = _tracker_int(_safe_tracker_call(c, "d.tracker_size", torrent_hash, 0), 0) or 0
rows: list[list] = []
for index in range(max(0, total)):
target = _tracker_target(torrent_hash, index)
url = _safe_tracker_call(c, "t.url", target, "")
if not url:
for args in ((torrent_hash, index), ("", torrent_hash, index)):
try:
url = c.call("t.url", *args)
break
except Exception:
continue
if url:
enabled = _safe_tracker_call(c, "t.is_enabled", target, 1)
rows.append([url, enabled, None, None, None])
if rows:
return rows
raise RuntimeError("Cannot read trackers: " + "; ".join(errors))
def torrent_trackers(profile: dict, torrent_hash: str) -> list[dict]: def torrent_trackers(profile: dict, torrent_hash: str) -> list[dict]:
c = client_for(profile) c = client_for(profile)
rows = c.t.multicall(torrent_hash, "", "t.url=", "t.is_enabled=", "t.scrape_complete=", "t.scrape_incomplete=", "t.scrape_downloaded=") rows = _tracker_rows(c, torrent_hash)
trackers = [] trackers = []
for idx, r in enumerate(rows): for idx, r in enumerate(rows):
target = _tracker_target(torrent_hash, idx) target = _tracker_target(torrent_hash, idx)
@@ -1216,6 +1350,27 @@ def pause_hash(c: ScgiRtorrentClient, torrent_hash: str) -> dict:
return result return result
def stop_hash(c: ScgiRtorrentClient, torrent_hash: str) -> dict:
"""Stop an active rTorrent item without using pause semantics."""
h = str(torrent_hash or '')
if not h:
return {'hash': h, 'ok': False, 'error': 'missing hash'}
before = _download_runtime_state(c, h)
result = {'hash': h, 'before': before, 'commands': []}
if before.get('stopped'):
result.update({'ok': True, 'skipped': 'already_stopped', 'after': before})
return result
try:
# Note: Smart Queue now enforces the queue with d.stop only; user-paused torrents stay untouched.
c.call('d.stop', h)
result['commands'].append('d.stop')
result['after'] = _download_runtime_state(c, h)
result['ok'] = True
except Exception as exc:
result.update({'ok': False, 'error': str(exc), 'after': _download_runtime_state(c, h)})
return result
def resume_paused_hash(c: ScgiRtorrentClient, torrent_hash: str) -> dict: def resume_paused_hash(c: ScgiRtorrentClient, torrent_hash: str) -> dict:
"""Resume only a paused rTorrent item; never convert it through stop/start.""" """Resume only a paused rTorrent item; never convert it through stop/start."""
h = str(torrent_hash or '') h = str(torrent_hash or '')
@@ -1377,6 +1532,9 @@ def action(profile: dict, torrent_hashes: list[str], name: str, payload: dict |
if remove_data: if remove_data:
results.append(_remove_torrent_data(c, h)) results.append(_remove_torrent_data(c, h))
c.call(method, h) c.call(method, h)
if name == "recheck":
# Note: Recheck is tracked so even very fast checks still receive the after-check start/stop policy.
_mark_post_check_watch(int(profile.get("id") or 0), h)
return {"ok": True, "count": len(torrent_hashes), "remove_data": remove_data, "results": results} return {"ok": True, "count": len(torrent_hashes), "remove_data": remove_data, "results": results}
def add_magnet(profile: dict, uri: str, start: bool = True, directory: str = "", label: str = "") -> dict: def add_magnet(profile: dict, uri: str, start: bool = True, directory: str = "", label: str = "") -> dict:

View File

@@ -38,7 +38,9 @@ def _default_settings(user_id: int, profile_id: int) -> dict[str, Any]:
'min_speed_bytes': 1024, 'min_speed_bytes': 1024,
'min_seeds': 1, 'min_seeds': 1,
'min_peers': 0, 'min_peers': 0,
'manage_stopped': 0, 'ignore_seed_peer': 0,
'ignore_speed': 0,
'manage_stopped': 1,
'updated_at': utcnow(), 'updated_at': utcnow(),
} }
@@ -64,14 +66,18 @@ def save_settings(profile_id: int, data: dict[str, Any], user_id: int | None = N
'min_seeds': _int_setting(data, current, 'min_seeds', 0, 0), 'min_seeds': _int_setting(data, current, 'min_seeds', 0, 0),
# Note: Min peers is optional; when set, stalled detection requires low speed, low seeds and low peers. # Note: Min peers is optional; when set, stalled detection requires low speed, low seeds and low peers.
'min_peers': _int_setting(data, current, 'min_peers', 0, 0), 'min_peers': _int_setting(data, current, 'min_peers', 0, 0),
# Note: This switch protects fully stopped torrents from automatic starts; by default Smart Queue manages only paused items. # Note: Ignore seed/peer removes source counts from stalled detection, useful when sources appear rarely.
'manage_stopped': 1 if data.get('manage_stopped', current.get('manage_stopped')) else 0, 'ignore_seed_peer': 1 if data.get('ignore_seed_peer', current.get('ignore_seed_peer')) else 0,
# Note: Ignore speed removes low transfer rate from stalled detection; with both ignores enabled only stalled_seconds matters.
'ignore_speed': 1 if data.get('ignore_speed', current.get('ignore_speed')) else 0,
# Note: Compatibility field retained; enabled Smart Queue always manages stopped torrents and never manages user-paused torrents.
'manage_stopped': 1,
} }
now = utcnow() now = utcnow()
with connect() as conn: with connect() as conn:
conn.execute( conn.execute(
'''INSERT INTO smart_queue_settings(user_id,profile_id,enabled,max_active_downloads,stalled_seconds,min_speed_bytes,min_seeds,min_peers,manage_stopped,updated_at) '''INSERT INTO smart_queue_settings(user_id,profile_id,enabled,max_active_downloads,stalled_seconds,min_speed_bytes,min_seeds,min_peers,ignore_seed_peer,ignore_speed,manage_stopped,updated_at)
VALUES(?,?,?,?,?,?,?,?,?,?) VALUES(?,?,?,?,?,?,?,?,?,?,?,?)
ON CONFLICT(user_id, profile_id) DO UPDATE SET ON CONFLICT(user_id, profile_id) DO UPDATE SET
enabled=excluded.enabled, enabled=excluded.enabled,
max_active_downloads=excluded.max_active_downloads, max_active_downloads=excluded.max_active_downloads,
@@ -79,9 +85,11 @@ def save_settings(profile_id: int, data: dict[str, Any], user_id: int | None = N
min_speed_bytes=excluded.min_speed_bytes, min_speed_bytes=excluded.min_speed_bytes,
min_seeds=excluded.min_seeds, min_seeds=excluded.min_seeds,
min_peers=excluded.min_peers, min_peers=excluded.min_peers,
ignore_seed_peer=excluded.ignore_seed_peer,
ignore_speed=excluded.ignore_speed,
manage_stopped=excluded.manage_stopped, manage_stopped=excluded.manage_stopped,
updated_at=excluded.updated_at''', updated_at=excluded.updated_at''',
(user_id, profile_id, settings['enabled'], settings['max_active_downloads'], settings['stalled_seconds'], settings['min_speed_bytes'], settings['min_seeds'], settings['min_peers'], settings['manage_stopped'], now), (user_id, profile_id, settings['enabled'], settings['max_active_downloads'], settings['stalled_seconds'], settings['min_speed_bytes'], settings['min_seeds'], settings['min_peers'], settings['ignore_seed_peer'], settings['ignore_speed'], settings['manage_stopped'], now),
) )
return get_settings(profile_id, user_id) return get_settings(profile_id, user_id)
@@ -241,7 +249,7 @@ def _restore_auto_label(client: Any, profile_id: int, torrent_hash: str, current
except Exception: except Exception:
return False return False
try: try:
# Note: Starting a torrent removes only Smart Queue's technical marker, so labels added while paused stay untouched. # Note: Starting a torrent removes only Smart Queue's technical marker, so labels added while stopped stay untouched.
if _has_smart_queue_label(live_label): if _has_smart_queue_label(live_label):
client.call('d.custom1.set', torrent_hash, _without_smart_queue_label(live_label)) client.call('d.custom1.set', torrent_hash, _without_smart_queue_label(live_label))
conn.execute('DELETE FROM smart_queue_auto_labels WHERE profile_id=? AND torrent_hash=?', (profile_id, torrent_hash)) conn.execute('DELETE FROM smart_queue_auto_labels WHERE profile_id=? AND torrent_hash=?', (profile_id, torrent_hash))
@@ -291,19 +299,20 @@ def _ensure_rtorrent_download_cap(client: Any, max_active: int) -> dict[str, Any
def _start_download(client: Any, torrent: dict[str, Any]) -> dict[str, Any]: def _start_download(client: Any, torrent: dict[str, Any]) -> dict[str, Any]:
"""Resume paused torrents through rTorrent's pause model.""" """Start only stopped Smart Queue candidates; paused torrents are a user decision."""
h = str(torrent.get('hash') or '') h = str(torrent.get('hash') or '')
if not h: if not h:
return {'hash': h, 'ok': False, 'error': 'missing hash'} return {'hash': h, 'ok': False, 'error': 'missing hash'}
if bool(torrent.get('paused')) or str(torrent.get('status') or '').lower() == 'paused' or int(torrent.get('state') or 0): if _is_user_paused(torrent):
# Note: Smart Queue candidates paused with d.pause must be resumed with d.resume, without d.start/d.stop. # Note: Smart Queue never unpauses user-paused torrents; it manages only stopped items.
return rtorrent.resume_paused_hash(client, h) return {'hash': h, 'ok': False, 'skipped': 'user_paused'}
# Note: Only optional manage_stopped uses the start path for fully stopped torrents. # Note: This is the same helper used by the manual Start action, so queue starts follow the UI path.
# Note: Smart Queue uses the same helper as the manual Start action, so start behavior stays identical.
return rtorrent.start_or_resume_hash(client, h) return rtorrent.start_or_resume_hash(client, h)
def _verify_started_downloads(client: Any, hashes: list[str], attempts: int = 10, delay: float = 0.5) -> tuple[list[str], list[dict[str, Any]]]: def _verify_started_downloads(client: Any, hashes: list[str], attempts: int = 10, delay: float = 0.5) -> tuple[list[str], list[dict[str, Any]]]:
"""Verify starts after rTorrent has time to process resume/start commands.""" """Verify starts after rTorrent has time to process manual-equivalent start commands."""
pending = [h for h in hashes if h] pending = [h for h in hashes if h]
started: list[str] = [] started: list[str] = []
no_effect: list[dict[str, Any]] = [] no_effect: list[dict[str, Any]] = []
@@ -342,11 +351,17 @@ def _read_live_start_state(client: Any, torrent_hash: str) -> dict[str, Any]:
result[key] = int(value or 0) if key in {'state', 'active', 'open', 'priority'} else str(value or '') result[key] = int(value or 0) if key in {'state', 'active', 'open', 'priority'} else str(value or '')
except Exception as exc: except Exception as exc:
result[f'{key}_error'] = str(exc) result[f'{key}_error'] = str(exc)
# Note: Do not treat d.is_open or state=1 as resumed; Paused can also have those values. # Note: Manual Start in rTorrent is successful when d.state becomes 1.
# Smart Queue counts a start only after d.is_active=1, meaning the pause was actually removed. # d.is_active can stay 0 for queued/idle downloads, so it must not be used as the only success check.
result['started'] = bool(int(result.get('active') or 0)) result['started'] = bool(int(result.get('state') or 0) or int(result.get('active') or 0))
return result return result
def _is_user_paused(torrent: dict[str, Any]) -> bool:
"""Return True for torrents paused by the user; Smart Queue must not touch them."""
status = str(torrent.get('status') or '').lower()
return bool(torrent.get('paused')) or status == 'paused'
def _set_smart_queue_label(client: Any, torrent_hash: str, current_label: str = '', attempts: int = 3) -> bool: def _set_smart_queue_label(client: Any, torrent_hash: str, current_label: str = '', attempts: int = 3) -> bool:
labels = _label_names(current_label) labels = _label_names(current_label)
if SMART_QUEUE_LABEL in labels: if SMART_QUEUE_LABEL in labels:
@@ -364,7 +379,7 @@ def _set_smart_queue_label(client: Any, torrent_hash: str, current_label: str =
return False return False
def _mark_auto_paused(client: Any, profile_id: int, torrent: dict[str, Any]) -> bool: def _mark_auto_stopped(client: Any, profile_id: int, torrent: dict[str, Any]) -> bool:
torrent_hash = str(torrent.get('hash') or '') torrent_hash = str(torrent.get('hash') or '')
if not torrent_hash: if not torrent_hash:
return False return False
@@ -374,20 +389,31 @@ def _mark_auto_paused(client: Any, profile_id: int, torrent: dict[str, Any]) ->
return _set_smart_queue_label(client, torrent_hash, previous) return _set_smart_queue_label(client, torrent_hash, previous)
def _is_started_download_slot(torrent: dict[str, Any] | None) -> bool:
"""Return True for incomplete torrents already started in rTorrent, including manual starts."""
if not torrent or int(torrent.get('complete') or 0):
return False
status = str(torrent.get('status') or '').lower()
if status == 'checking':
return False
# Note: Manual Start changes d.state first; d.is_active may stay 0 while rTorrent is queued or idle.
return bool(int(torrent.get('state') or 0) or int(torrent.get('active') or 0))
def _is_smart_queue_hold(torrent: dict[str, Any] | None, manage_stopped: bool = True) -> bool: def _is_smart_queue_hold(torrent: dict[str, Any] | None, manage_stopped: bool = True) -> bool:
if not torrent or int(torrent.get('complete') or 0): if not torrent or int(torrent.get('complete') or 0):
return False return False
if _is_started_download_slot(torrent):
# Note: A manual start can leave the Smart Queue label behind; started items are active slots, not holds.
return False
if _has_stalled_label(str(torrent.get('label') or '')): if _has_stalled_label(str(torrent.get('label') or '')):
return False return False
if _is_user_paused(torrent):
# Note: Paused torrents are always treated as user-controlled and are not Smart Queue holds.
return False
if _has_smart_queue_label(str(torrent.get('label') or '')): if _has_smart_queue_label(str(torrent.get('label') or '')):
return True return True
# Note: Paused in rTorrent usually has state=1 and active=0, so state=0 must not be required. # Note: Smart Queue manages stopped torrents by default; the old manage_stopped flag is ignored for compatibility.
# This lets Smart Queue treat paused torrents as pending and fill the queue target later.
if bool(torrent.get('paused')):
return True
# Note: Fully stopped items are managed only when Use stopped torrents is enabled.
if not manage_stopped:
return False
return not int(torrent.get('state') or 0) return not int(torrent.get('state') or 0)
@@ -432,31 +458,50 @@ def _cleanup_auto_labels(client: Any, profile_id: int, torrents: list[dict[str,
def _is_running_download_slot(t: dict[str, Any]) -> bool: def _is_running_download_slot(t: dict[str, Any]) -> bool:
"""Return True for incomplete torrents that already occupy a Smart Queue slot.""" """Return True for incomplete torrents that already occupy a Smart Queue slot."""
# Note: The Smart Queue limit means the target number of actually active slots. # Note: Do not exclude Smart Queue/Stalled labels here. Manual Start can leave old labels,
# Paused can have state=1/open=1, so a slot is counted only after d.is_active=1. # and those torrents still must count toward the global Smart Queue limit.
if int(t.get('complete') or 0): return _is_started_download_slot(t)
return False
if _has_smart_queue_label(str(t.get('label') or '')) or _has_stalled_label(str(t.get('label') or '')):
return False def _is_stalled_download(t: dict[str, Any], min_speed: int, min_seeds: int, min_peers: int, ignore_seed_peer: bool, ignore_speed: bool) -> bool:
status = str(t.get('status') or '').lower() """Return True when a started torrent should begin or continue the stalled timer."""
if status == 'checking' or status == 'paused' or bool(t.get('paused')): # Note: Each ignore switch only removes its own criterion; the stalled timer is still respected after criteria match.
return False speed_ok = True if ignore_speed else int(t.get('down_rate') or 0) <= max(0, int(min_speed or 0))
return bool(int(t.get('active') or 0)) source_ok = True if ignore_seed_peer else int(t.get('seeds') or 0) <= max(0, int(min_seeds or 0)) and (min_peers <= 0 or int(t.get('peers') or 0) <= min_peers)
return speed_ok and source_ok
def _stalled_timer_key(min_speed: int, min_seeds: int, min_peers: int, stalled_seconds: int, ignore_seed_peer: bool, ignore_speed: bool) -> str:
"""Return a stable key for the stalled rules that started the current timer."""
# Note: Changing ignore switches or thresholds restarts existing stalled timers instead of reusing old rows.
return f"v2|speed={int(min_speed or 0)}|seeds={int(min_seeds or 0)}|peers={int(min_peers or 0)}|seconds={int(stalled_seconds or 0)}|ignore_sources={int(bool(ignore_seed_peer))}|ignore_speed={int(bool(ignore_speed))}"
def _is_low_activity_download(t: dict[str, Any], min_speed: int, min_seeds: int, min_peers: int, ignore_seed_peer: bool = False, ignore_speed: bool = False) -> bool:
"""Return True when a started torrent is weak and should be stopped first."""
# Note: Stop priority uses only criteria that are not ignored, so disabled criteria cannot stop torrents earlier.
low_speed = False if ignore_speed else int(t.get('down_rate') or 0) <= max(0, int(min_speed or 0))
low_seeds = False if ignore_seed_peer else int(t.get('seeds') or 0) <= max(0, int(min_seeds or 0))
low_peers = False if ignore_seed_peer or min_peers <= 0 else int(t.get('peers') or 0) <= max(0, int(min_peers or 0))
return low_speed or low_seeds or low_peers
def _is_waiting_download_candidate(t: dict[str, Any], manage_stopped: bool) -> bool: def _is_waiting_download_candidate(t: dict[str, Any], manage_stopped: bool) -> bool:
"""Return True for paused/held torrents Smart Queue may resume later.""" """Return True for stopped torrents Smart Queue may start later."""
if int(t.get('complete') or 0): if int(t.get('complete') or 0):
return False return False
if str(t.get('status') or '').lower() == 'checking':
# Note: Torrents still being checked must finish post-check handling before Smart Queue may start them.
return False
if _has_stalled_label(str(t.get('label') or '')): if _has_stalled_label(str(t.get('label') or '')):
return False return False
if _is_user_paused(t):
# Note: User-paused torrents are never candidates, even when they have no Smart Queue label.
return False
if _has_smart_queue_label(str(t.get('label') or '')): if _has_smart_queue_label(str(t.get('label') or '')):
return True return True
# Note: Paused items are the primary source for filling the queue, regardless of manage_stopped. # Note: Enabled Smart Queue manages all stopped torrents; no separate stopped-torrent switch is needed.
if bool(t.get('paused')) or str(t.get('status') or '').lower() == 'paused': return not int(t.get('state') or 0)
return True
# Note: Stopped items are added only when the user enabled Use stopped torrents.
return bool(manage_stopped) and not int(t.get('state') or 0)
def check(profile: dict | None = None, user_id: int | None = None, force: bool = False) -> dict[str, Any]: def check(profile: dict | None = None, user_id: int | None = None, force: bool = False) -> dict[str, Any]:
@@ -471,58 +516,74 @@ def check(profile: dict | None = None, user_id: int | None = None, force: bool =
try: try:
# Note: When Smart Queue is disabled, only technical labels are cleaned up, without starting or pausing torrents. # Note: When Smart Queue is disabled, only technical labels are cleaned up, without starting or pausing torrents.
torrents = rtorrent.list_torrents(profile) torrents = rtorrent.list_torrents(profile)
restored = _cleanup_auto_labels(rtorrent.client_for(profile), profile_id, torrents, set(), bool(settings.get('manage_stopped'))) restored = _cleanup_auto_labels(rtorrent.client_for(profile), profile_id, torrents, set(), True)
except Exception: except Exception:
restored = [] restored = []
add_history(profile_id, 'skipped_disabled', [], [], 0, {'enabled': False, 'labels_restored': restored}, user_id) add_history(profile_id, 'skipped_disabled', [], [], 0, {'enabled': False, 'labels_restored': restored}, user_id)
return {'ok': True, 'enabled': False, 'paused': [], 'resumed': [], 'labels_restored': restored, 'message': 'Smart Queue disabled'} return {'ok': True, 'enabled': False, 'paused': [], 'resumed': [], 'stopped': [], 'started': [], 'labels_restored': restored, 'message': 'Smart Queue disabled'}
torrents = rtorrent.list_torrents(profile) torrents = rtorrent.list_torrents(profile)
# Note: Torrents marked as Stalled are treated as queue-blocked even when there are no other pending downloads. # Note: Stalled labels block automatic starting only; a manually started Stalled item still counts as a running slot.
stalled_label_hashes = {str(t.get('hash') or '') for t in torrents if _has_stalled_label(str(t.get('label') or '')) and t.get('hash')} stalled_label_hashes = {str(t.get('hash') or '') for t in torrents if _has_stalled_label(str(t.get('label') or '')) and t.get('hash')}
excluded = _excluded_hashes(profile_id, user_id) | stalled_label_hashes user_excluded = _excluded_hashes(profile_id, user_id)
manage_stopped = bool(settings.get('manage_stopped')) manage_stopped = True
def is_managed_hold(t: dict[str, Any]) -> bool:
return _has_smart_queue_label(str(t.get('label') or ''))
# Note: Count Smart Queue slots by d.is_active because Paused can have state=1/open=1 and must not occupy the limit. # Note: Count every started incomplete torrent, including items started manually and items with old Smart Queue labels.
downloading = [ downloading = [
t for t in torrents t for t in torrents
if _is_running_download_slot(t) if _is_running_download_slot(t)
and not is_managed_hold(t) and str(t.get('hash') or '') not in user_excluded
and t.get('hash') not in excluded
] ]
# Note: Candidates also include regular Paused items without a label. Otherwise the queue sees only one or two items # Note: Waiting candidates are stopped queue holds only; Stalled labels are not auto-started again.
# and cannot fill the configured target of 100.
stopped = [ stopped = [
t for t in torrents t for t in torrents
if t.get('hash') not in excluded if str(t.get('hash') or '') not in user_excluded
and str(t.get('hash') or '') not in stalled_label_hashes
and _is_waiting_download_candidate(t, manage_stopped) and _is_waiting_download_candidate(t, manage_stopped)
and not _is_running_download_slot(t) and not _is_running_download_slot(t)
] ]
manual_labeled_running = [
str(t.get('hash') or '') for t in downloading
if str(t.get('hash') or '') and _has_smart_queue_label(str(t.get('label') or ''))
]
min_speed = int(settings.get('min_speed_bytes') or 0) min_speed = int(settings.get('min_speed_bytes') or 0)
min_seeds = int(settings.get('min_seeds') or 0) min_seeds = int(settings.get('min_seeds') or 0)
min_peers = int(settings.get('min_peers') or 0) min_peers = int(settings.get('min_peers') or 0)
ignore_seed_peer = bool(int(settings.get('ignore_seed_peer') or 0))
ignore_speed = bool(int(settings.get('ignore_speed') or 0))
stalled_seconds = int(settings.get('stalled_seconds') or 300) stalled_seconds = int(settings.get('stalled_seconds') or 300)
timer_key = _stalled_timer_key(min_speed, min_seeds, min_peers, stalled_seconds, ignore_seed_peer, ignore_speed)
now = utcnow() now = utcnow()
now_ts = datetime.now(timezone.utc).timestamp() now_ts = datetime.now(timezone.utc).timestamp()
stalled: list[dict[str, Any]] = [] stalled: list[dict[str, Any]] = []
stop_eligible: list[dict[str, Any]] = []
# Note: Toast diagnostics count active torrents whose ignored criteria would otherwise match during this check.
ignored_seed_peer_count = 0
ignored_speed_count = 0
with connect() as conn: with connect() as conn:
for t in downloading: for t in downloading:
# Note: Stalled detection requires low speed plus low seeds and, when configured, low peers. # Note: Stalled detection respects seed/peer and speed ignore switches before starting the timer.
is_stalled = int(t.get('down_rate') or 0) <= min_speed and int(t.get('seeds') or 0) <= min_seeds and (min_peers <= 0 or int(t.get('peers') or 0) <= min_peers) if ignore_seed_peer and (int(t.get('seeds') or 0) <= max(0, int(min_seeds or 0)) or (min_peers > 0 and int(t.get('peers') or 0) <= max(0, int(min_peers or 0)))):
ignored_seed_peer_count += 1
if ignore_speed and int(t.get('down_rate') or 0) <= max(0, int(min_speed or 0)):
ignored_speed_count += 1
is_stalled = _is_stalled_download(t, min_speed, min_seeds, min_peers, ignore_seed_peer, ignore_speed)
# Note: Hard-limit enforcement respects the same ignore switches before choosing weak items.
if _is_low_activity_download(t, min_speed, min_seeds, min_peers, ignore_seed_peer, ignore_speed):
stop_eligible.append(t)
h = t.get('hash') h = t.get('hash')
if not h: if not h:
continue continue
if is_stalled: if is_stalled:
row = conn.execute('SELECT first_stalled_at FROM smart_queue_stalled WHERE profile_id=? AND torrent_hash=?', (profile_id, h)).fetchone() row = conn.execute('SELECT first_stalled_at, timer_key FROM smart_queue_stalled WHERE profile_id=? AND torrent_hash=?', (profile_id, h)).fetchone()
if row: if row and str(row.get('timer_key') or '') == timer_key:
conn.execute('UPDATE smart_queue_stalled SET updated_at=? WHERE profile_id=? AND torrent_hash=?', (now, profile_id, h)) conn.execute('UPDATE smart_queue_stalled SET updated_at=? WHERE profile_id=? AND torrent_hash=?', (now, profile_id, h))
first = row['first_stalled_at'] first = row['first_stalled_at']
else: else:
# Note: A changed stalled rule starts a fresh timer, so old rows cannot instantly mark torrents as Stalled.
first = now first = now
conn.execute('INSERT OR REPLACE INTO smart_queue_stalled(profile_id,torrent_hash,first_stalled_at,updated_at) VALUES(?,?,?,?)', (profile_id, h, first, now)) conn.execute('INSERT OR REPLACE INTO smart_queue_stalled(profile_id,torrent_hash,first_stalled_at,updated_at,timer_key) VALUES(?,?,?,?,?)', (profile_id, h, first, now, timer_key))
if now_ts - _ts(first) >= stalled_seconds: if now_ts - _ts(first) >= stalled_seconds:
stalled.append(t) stalled.append(t)
else: else:
@@ -537,95 +598,106 @@ def check(profile: dict | None = None, user_id: int | None = None, force: bool =
max_active = max(1, int(settings.get('max_active_downloads') or 5)) max_active = max(1, int(settings.get('max_active_downloads') or 5))
stalled_hashes = {str(t.get('hash') or '') for t in stalled} stalled_hashes = {str(t.get('hash') or '') for t in stalled}
# Enforce the hard active-download cap first. The previous logic only limited # Enforce the hard active-download cap across the whole started queue, including manual starts.
# newly resumed torrents, so already-active downloads could stay above the limit. # Note: Weak/no-source torrents are stopped first, but the cap is still enforced when the overflow is larger.
pause_rank = sorted( over_limit = max(0, len(downloading) - max_active)
stop_eligible_hashes = {str(t.get('hash') or '') for t in stop_eligible}
stop_rank = sorted(
downloading, downloading,
key=lambda t: ( key=lambda t: (
0 if str(t.get('hash') or '') in stalled_hashes else 1, 0 if str(t.get('hash') or '') in stalled_hashes else 1,
0 if str(t.get('hash') or '') in stop_eligible_hashes else 1,
int(t.get('down_rate') or 0), int(t.get('down_rate') or 0),
int(t.get('seeds') or 0), int(t.get('seeds') or 0),
int(t.get('peers') or 0), int(t.get('peers') or 0),
), ),
) )
to_pause: list[dict[str, Any]] = pause_rank[:max(0, len(downloading) - max_active)] to_stop: list[dict[str, Any]] = stop_rank[:over_limit]
pause_hashes = {str(t.get('hash') or '') for t in to_pause} stop_hashes = {str(t.get('hash') or '') for t in to_stop}
# Note: Confirmed stalled downloads are removed from the active queue immediately, then new candidates can fill those slots. # Note: Confirmed stalled downloads are removed from the active queue immediately, then new candidates can fill those slots.
for t in stalled: for t in stalled:
h = str(t.get('hash') or '') h = str(t.get('hash') or '')
if h and h not in pause_hashes: if h and h not in stop_hashes:
to_pause.append(t) to_stop.append(t)
pause_hashes.add(h) stop_hashes.add(h)
active_after_pause = max(0, len(downloading) - len(to_pause))
available_slots = max(0, max_active - active_after_pause)
to_resume = candidates[:available_slots]
# Note: Items outside the current start batch are explicitly marked as pending Smart Queue items.
to_label_waiting = candidates[available_slots:]
c = rtorrent.client_for(profile) c = rtorrent.client_for(profile)
rtorrent_cap = _ensure_rtorrent_download_cap(c, max_active) rtorrent_cap = _ensure_rtorrent_download_cap(c, max_active)
paused: list[str] = [] stopped_by_queue: list[str] = []
resumed: list[str] = [] started_by_queue: list[str] = []
label_failed: list[str] = [] label_failed: list[str] = []
stalled_labeled: list[str] = [] stalled_labeled: list[str] = []
stop_failed: list[dict[str, str]] = []
start_failed: list[dict[str, str]] = [] start_failed: list[dict[str, str]] = []
start_no_effect: list[dict[str, Any]] = [] start_no_effect: list[dict[str, Any]] = []
resume_requested: list[str] = [] start_requested: list[str] = []
start_results: list[dict[str, Any]] = [] start_results: list[dict[str, Any]] = []
for t in to_pause: for t in to_stop:
try:
h = str(t.get('hash') or '') h = str(t.get('hash') or '')
pause_result = rtorrent.pause_hash(c, h) try:
if not pause_result.get('ok'): # Note: Smart Queue stops with the same low-level d.stop command used by the manual Stop action.
raise RuntimeError(pause_result.get('error') or 'pause failed') # This avoids extra pre-check RPCs and keeps large queues from failing after only a few items.
c.call('d.stop', h)
if h in stalled_hashes: if h in stalled_hashes:
if _ensure_stalled_label(c, h, _read_label(c, h, str(t.get('label') or ''))): if _ensure_stalled_label(c, h, _read_label(c, h, str(t.get('label') or ''))):
stalled_labeled.append(h) stalled_labeled.append(h)
else: else:
label_failed.append(h) label_failed.append(h)
elif not _mark_auto_paused(c, profile_id, t): elif not _mark_auto_stopped(c, profile_id, t):
label_failed.append(h) label_failed.append(h)
paused.append(h) stopped_by_queue.append(h)
except Exception: except Exception as exc:
pass # Note: Stop failures are stored in history instead of being swallowed, so queue drift is visible.
stop_failed.append({'hash': h, 'error': str(exc)})
active_after_stop = max(0, len(downloading) - len(stopped_by_queue))
# Note: Starts are planned only after confirmed stops, so failed stops cannot push the queue above the cap.
available_slots = max(0, max_active - active_after_stop)
to_start = candidates[:available_slots]
# Note: Items outside the current start batch are explicitly marked as pending Smart Queue items.
to_label_waiting = candidates[available_slots:]
for t in to_label_waiting: for t in to_label_waiting:
h = str(t.get('hash') or '') h = str(t.get('hash') or '')
if not h or h in pause_hashes: if not h or h in stop_hashes:
continue continue
try: try:
if not _mark_auto_paused(c, profile_id, t): if not _mark_auto_stopped(c, profile_id, t):
label_failed.append(h) label_failed.append(h)
except Exception: except Exception:
label_failed.append(h) label_failed.append(h)
# Note: Start the whole candidate batch in one round. Remove the label after an accepted RPC, # Note: Start the whole candidate batch in one round. Remove the label after an accepted RPC,
# because rTorrent may keep some items in its own queue with active=0 despite a valid d.start/d.resume. # because rTorrent may keep some items in its own queue with active=0 despite a valid d.start/d.resume.
for t in to_resume: for t in to_start:
h = str(t.get('hash') or '') h = str(t.get('hash') or '')
if not h: if not h:
continue continue
try: try:
result = _start_download(c, t) result = _start_download(c, t)
start_results.append(result) start_results.append(result)
resume_requested.append(h) start_requested.append(h)
except Exception as exc: except Exception as exc:
start_failed.append({'hash': h, 'error': str(exc)}) start_failed.append({'hash': h, 'error': str(exc)})
active_verified, start_no_effect = _verify_started_downloads(c, resume_requested) active_verified, start_no_effect = _verify_started_downloads(c, start_requested)
for h in active_verified: for h in active_verified:
_restore_auto_label(c, profile_id, h, None) _restore_auto_label(c, profile_id, h, None)
# Note: History shows only torrents actually unpaused, not just the number of sent commands. try:
resumed = list(active_verified) # Note: Once Smart Queue starts a post-check torrent, its temporary download-after-check label is no longer needed.
rtorrent.clear_post_check_download_label(c, h, None)
except Exception:
label_failed.append(h)
# Note: History shows only torrents actually started, not just the number of sent commands.
started_by_queue = list(active_verified)
keep_labels = ( keep_labels = (
set(paused) set(stopped_by_queue)
| {str(t.get('hash') or '') for t in to_label_waiting} | {str(t.get('hash') or '') for t in to_label_waiting}
| {str(t.get('hash') or '') for t in stopped if _has_smart_queue_label(str(t.get('label') or '')) and str(t.get('hash') or '') not in set(resumed)} | {str(t.get('hash') or '') for t in stopped if _has_smart_queue_label(str(t.get('label') or '')) and str(t.get('hash') or '') not in set(started_by_queue)}
) )
restored = _cleanup_auto_labels(c, profile_id, torrents, keep_labels, manage_stopped) restored = _cleanup_auto_labels(c, profile_id, torrents, keep_labels, manage_stopped)
details = {'excluded': len(excluded), 'excluded_stalled': len(stalled_label_hashes), 'enabled': bool(settings.get('enabled')), 'auto_label': SMART_QUEUE_LABEL, 'stalled_label': SMART_QUEUE_STALLED_LABEL, 'stalled_labeled': stalled_labeled, 'labels_restored': restored, 'labels_failed': label_failed, 'start_failed': start_failed, 'start_no_effect': start_no_effect, 'start_results': start_results, 'resume_requested': resume_requested, 'active_verified': active_verified, 'waiting_labeled': len(to_label_waiting), 'manage_stopped': manage_stopped, 'max_active_downloads': max_active, 'active_before': len(downloading), 'active_after_expected': active_after_pause + len(resumed), 'paused_planned': len(to_pause), 'resumed_planned': len(to_resume), 'rtorrent_cap': rtorrent_cap} details = {'excluded': len(user_excluded), 'excluded_stalled': len(stalled_label_hashes), 'manual_labeled_running': len(manual_labeled_running), 'manual_labeled_running_hashes': manual_labeled_running[:100], 'enabled': bool(settings.get('enabled')), 'auto_label': SMART_QUEUE_LABEL, 'stalled_label': SMART_QUEUE_STALLED_LABEL, 'stalled_labeled': stalled_labeled, 'labels_restored': restored, 'labels_failed': label_failed, 'stop_failed': stop_failed, 'start_failed': start_failed, 'start_no_effect': start_no_effect, 'start_results': start_results, 'start_requested': start_requested, 'active_verified': active_verified, 'waiting_labeled': len(to_label_waiting), 'manage_stopped': True, 'max_active_downloads': max_active, 'active_before': len(downloading), 'active_after_stop': active_after_stop, 'active_after_expected': active_after_stop + len(started_by_queue), 'over_limit': over_limit, 'stop_eligible': len(stop_eligible), 'ignore_seed_peer': ignore_seed_peer, 'ignore_speed': ignore_speed, 'ignored_seed_peer_count': ignored_seed_peer_count if ignore_seed_peer else 0, 'ignored_speed_count': ignored_speed_count if ignore_speed else 0, 'stalled_seconds': stalled_seconds, 'stalled_timer_key': timer_key, 'healthy_active_protected': 0, 'stopped_planned': len(to_stop), 'started_planned': len(to_start), 'paused_planned': len(to_stop), 'resumed_planned': len(to_start), 'rtorrent_cap': rtorrent_cap}
add_history(profile_id, 'force_check' if force else 'auto_check', paused, resumed, len(torrents), details, user_id) add_history(profile_id, 'force_check' if force else 'auto_check', stopped_by_queue, started_by_queue, len(torrents), {**details, 'stopped': stopped_by_queue, 'started': started_by_queue}, user_id)
return {'ok': True, 'enabled': bool(settings.get('enabled')), 'paused': paused, 'resumed': resumed, 'resume_requested': resume_requested, 'waiting_labeled': len(to_label_waiting), 'stalled_labeled': stalled_labeled, 'excluded_stalled': len(stalled_label_hashes), 'labels_restored': restored, 'labels_failed': label_failed, 'start_failed': start_failed, 'start_no_effect': start_no_effect, 'active_verified': active_verified, 'rtorrent_cap': rtorrent_cap, 'checked': len(torrents), 'excluded': len(excluded), 'settings': settings} return {'ok': True, 'enabled': bool(settings.get('enabled')), 'paused': stopped_by_queue, 'resumed': started_by_queue, 'stopped': stopped_by_queue, 'started': started_by_queue, 'start_requested': start_requested, 'waiting_labeled': len(to_label_waiting), 'stalled_labeled': stalled_labeled, 'excluded_stalled': len(stalled_label_hashes), 'manual_labeled_running': len(manual_labeled_running), 'labels_restored': restored, 'labels_failed': label_failed, 'stop_failed': stop_failed, 'start_failed': start_failed, 'start_no_effect': start_no_effect, 'active_verified': active_verified, 'active_before': len(downloading), 'active_after_stop': active_after_stop, 'over_limit': over_limit, 'stop_eligible': len(stop_eligible), 'ignore_seed_peer': ignore_seed_peer, 'ignore_speed': ignore_speed, 'ignored_seed_peer_count': ignored_seed_peer_count if ignore_seed_peer else 0, 'ignored_speed_count': ignored_speed_count if ignore_speed else 0, 'stalled_seconds': stalled_seconds, 'stalled_timer_key': timer_key, 'healthy_active_protected': 0, 'rtorrent_cap': rtorrent_cap, 'checked': len(torrents), 'excluded': len(user_excluded), 'settings': settings}

View File

@@ -0,0 +1,159 @@
from __future__ import annotations
import threading
from typing import Any
from ..db import connect, utcnow
from .rtorrent import human_rate
_SESSION_STARTED_AT = utcnow()
_CACHE: dict[int, dict[str, Any]] = {}
_LOADED = False
_LOCK = threading.Lock()
def _empty_peak(profile_id: int, all_time: dict[str, Any] | None = None) -> dict[str, Any]:
# Notatka: jedna struktura w pamięci trzyma bieżącą sesję i rekord ogólny dla profilu rTorrent.
all_time = all_time or {}
return {
"profile_id": int(profile_id),
"session_started_at": _SESSION_STARTED_AT,
"session_down_peak": 0,
"session_up_peak": 0,
"session_down_peak_at": None,
"session_up_peak_at": None,
"all_time_down_peak": int(all_time.get("all_time_down_peak") or 0),
"all_time_up_peak": int(all_time.get("all_time_up_peak") or 0),
"all_time_down_peak_at": all_time.get("all_time_down_peak_at"),
"all_time_up_peak_at": all_time.get("all_time_up_peak_at"),
}
def load_cache() -> None:
# Notatka: rekordy ogólne są ładowane przy starcie aplikacji, a rekord sesji zaczyna się od zera.
global _LOADED
with _LOCK:
if _LOADED:
return
with connect() as conn:
rows = conn.execute("SELECT * FROM transfer_speed_peaks").fetchall()
for row in rows:
profile_id = int(row.get("profile_id") or 0)
if profile_id:
_CACHE[profile_id] = _empty_peak(profile_id, row)
_LOADED = True
def _ensure_profile(profile_id: int) -> dict[str, Any]:
# Notatka: leniwe ładowanie chroni nowe profile dodane po starcie przed pustymi rekordami.
profile_id = int(profile_id)
item = _CACHE.get(profile_id)
if item:
return item
with connect() as conn:
row = conn.execute("SELECT * FROM transfer_speed_peaks WHERE profile_id=?", (profile_id,)).fetchone()
item = _empty_peak(profile_id, row)
_CACHE[profile_id] = item
return item
def _persist(item: dict[str, Any]) -> None:
# Notatka: SQLite dostaje zapis tylko wtedy, gdy pojawił się nowy rekord sesji lub rekord ogólny.
now = utcnow()
with connect() as conn:
conn.execute(
"""
INSERT INTO transfer_speed_peaks(
profile_id, session_started_at, session_down_peak, session_up_peak,
session_down_peak_at, session_up_peak_at, all_time_down_peak,
all_time_up_peak, all_time_down_peak_at, all_time_up_peak_at,
created_at, updated_at
) VALUES(?,?,?,?,?,?,?,?,?,?,?,?)
ON CONFLICT(profile_id) DO UPDATE SET
session_started_at=excluded.session_started_at,
session_down_peak=excluded.session_down_peak,
session_up_peak=excluded.session_up_peak,
session_down_peak_at=excluded.session_down_peak_at,
session_up_peak_at=excluded.session_up_peak_at,
all_time_down_peak=excluded.all_time_down_peak,
all_time_up_peak=excluded.all_time_up_peak,
all_time_down_peak_at=excluded.all_time_down_peak_at,
all_time_up_peak_at=excluded.all_time_up_peak_at,
updated_at=excluded.updated_at
""",
(
int(item["profile_id"]),
item["session_started_at"],
int(item["session_down_peak"]),
int(item["session_up_peak"]),
item.get("session_down_peak_at"),
item.get("session_up_peak_at"),
int(item["all_time_down_peak"]),
int(item["all_time_up_peak"]),
item.get("all_time_down_peak_at"),
item.get("all_time_up_peak_at"),
now,
now,
),
)
def _public(item: dict[str, Any]) -> dict[str, Any]:
# Notatka: frontend dostaje zarówno bajty/s, jak i gotowe etykiety w stylu istniejących prędkości.
return {
"session_started_at": item["session_started_at"],
"session": {
"down": int(item["session_down_peak"]),
"up": int(item["session_up_peak"]),
"down_h": human_rate(int(item["session_down_peak"])),
"up_h": human_rate(int(item["session_up_peak"])),
"down_at": item.get("session_down_peak_at"),
"up_at": item.get("session_up_peak_at"),
},
"all_time": {
"down": int(item["all_time_down_peak"]),
"up": int(item["all_time_up_peak"]),
"down_h": human_rate(int(item["all_time_down_peak"])),
"up_h": human_rate(int(item["all_time_up_peak"])),
"down_at": item.get("all_time_down_peak_at"),
"up_at": item.get("all_time_up_peak_at"),
},
}
def record(profile_id: int, down_rate: int = 0, up_rate: int = 0) -> dict[str, Any]:
# Notatka: poller wywołuje tę funkcję w tle; baza jest aktualizowana tylko po przebiciu rekordu.
load_cache()
down_rate = max(0, int(down_rate or 0))
up_rate = max(0, int(up_rate or 0))
measured_at = utcnow()
changed = False
with _LOCK:
item = _ensure_profile(int(profile_id))
if down_rate > int(item["session_down_peak"]):
item["session_down_peak"] = down_rate
item["session_down_peak_at"] = measured_at
changed = True
if up_rate > int(item["session_up_peak"]):
item["session_up_peak"] = up_rate
item["session_up_peak_at"] = measured_at
changed = True
if down_rate > int(item["all_time_down_peak"]):
item["all_time_down_peak"] = down_rate
item["all_time_down_peak_at"] = measured_at
changed = True
if up_rate > int(item["all_time_up_peak"]):
item["all_time_up_peak"] = up_rate
item["all_time_up_peak_at"] = measured_at
changed = True
result = _public(item)
if changed:
_persist(item)
return result
def current(profile_id: int) -> dict[str, Any]:
# Notatka: REST API może pokazać ostatni znany rekord bez wymuszania nowego pomiaru.
load_cache()
with _LOCK:
return _public(_ensure_profile(int(profile_id)))

View File

@@ -0,0 +1,440 @@
from __future__ import annotations
import json
import mimetypes
import re
import time
import threading
import ssl
import urllib.error
import urllib.parse
import urllib.request
from html.parser import HTMLParser
from pathlib import Path
from ..config import BASE_DIR
from ..db import connect, utcnow
TRACKER_CACHE_TTL_SECONDS = 7 * 24 * 60 * 60
FAVICON_CACHE_TTL_SECONDS = 7 * 24 * 60 * 60
TRACKER_SCAN_LIMIT = 80
FAVICON_DIR = BASE_DIR / "data" / "tracker_favicons"
PUBLIC_FAVICON_BASE = "/static/tracker_favicons"
_TRACKER_SCAN_LOCKS: dict[int, threading.Lock] = {}
_TRACKER_SCAN_LOCKS_GUARD = threading.Lock()
class _IconParser(HTMLParser):
def __init__(self):
super().__init__()
self.icons: list[str] = []
def handle_starttag(self, tag: str, attrs):
if tag.lower() != "link":
return
data = {str(k).lower(): str(v or "") for k, v in attrs}
rel = re.sub(r"\s+", " ", data.get("rel", "").lower()).strip()
href = data.get("href", "").strip()
if href and "icon" in rel:
self.icons.append(href)
def _now_epoch() -> float:
return time.time()
def tracker_domain(url: str) -> str:
raw = str(url or "").strip()
if not raw:
return ""
parsed = urllib.parse.urlparse(raw if "://" in raw else f"http://{raw}")
host = (parsed.hostname or "").lower().strip(".")
if host.startswith("www."):
host = host[4:]
return host
def _root_domain(domain: str) -> str:
parts = [p for p in str(domain or "").lower().strip(".").split(".") if p]
if len(parts) <= 2:
return ".".join(parts)
# Note: Tracker favicon discovery needs the real main site first; for t.pte.nu that is pte.nu, not t.pte.nu.
known_second_level_suffixes = {"co", "com", "net", "org", "gov", "edu", "ac"}
if len(parts[-1]) == 2 and parts[-2] in known_second_level_suffixes and len(parts) >= 3:
return ".".join(parts[-3:])
return ".".join(parts[-2:])
def _safe_filename(domain: str) -> str:
return re.sub(r"[^a-z0-9_.-]+", "_", domain.lower()).strip("._") or "tracker"
def _read_cached(profile_id: int, hashes: list[str], ttl: int) -> tuple[dict[str, list[dict]], set[str]]:
if not hashes:
return {}, set()
now = _now_epoch()
cached: dict[str, list[dict]] = {}
fresh: set[str] = set()
with connect() as conn:
for start in range(0, len(hashes), 900):
chunk = hashes[start:start + 900]
placeholders = ",".join("?" for _ in chunk)
rows = conn.execute(
f"SELECT torrent_hash, trackers_json, updated_epoch FROM tracker_summary_cache WHERE profile_id=? AND torrent_hash IN ({placeholders})",
(profile_id, *chunk),
).fetchall()
for row in rows:
h = str(row.get("torrent_hash") or "")
try:
items = json.loads(row.get("trackers_json") or "[]")
except Exception:
items = []
cached[h] = items if isinstance(items, list) else []
if now - float(row.get("updated_epoch") or 0) < ttl:
fresh.add(h)
return cached, fresh
def _store(profile_id: int, torrent_hash: str, trackers: list[dict]) -> None:
now = utcnow()
epoch = _now_epoch()
compact = []
seen = set()
for item in trackers:
domain = tracker_domain(str(item.get("url") or item.get("domain") or "")) or str(item.get("domain") or "")
if not domain or domain in seen:
continue
seen.add(domain)
compact.append({"domain": domain, "url": str(item.get("url") or "")})
with connect() as conn:
conn.execute(
"""
INSERT INTO tracker_summary_cache(profile_id, torrent_hash, trackers_json, updated_at, updated_epoch)
VALUES(?, ?, ?, ?, ?)
ON CONFLICT(profile_id, torrent_hash) DO UPDATE SET
trackers_json=excluded.trackers_json,
updated_at=excluded.updated_at,
updated_epoch=excluded.updated_epoch
""",
(profile_id, torrent_hash, json.dumps(compact), now, epoch),
)
def summary(profile: dict, hashes: list[str], loader, scan_limit: int = TRACKER_SCAN_LIMIT, include_favicons: bool = False) -> dict:
"""Build tracker sidebar data from disk cache and refresh a small batch per request."""
# Note: Tracker data is cached per torrent hash, so huge rTorrent libraries are never scanned in one UI request.
profile_id = int(profile.get("id") or 0)
clean_hashes = [str(h or "").strip() for h in hashes if str(h or "").strip()]
cached, fresh = _read_cached(profile_id, clean_hashes, TRACKER_CACHE_TTL_SECONDS)
missing = [h for h in clean_hashes if h not in fresh]
errors: list[dict] = []
scanned_now = 0
for h in missing[:max(0, int(scan_limit or 0))]:
try:
trackers = loader(h)
_store(profile_id, h, trackers)
cached[h] = [{"domain": tracker_domain(t.get("url") or t.get("domain") or ""), "url": str(t.get("url") or "")} for t in trackers]
fresh.add(h)
scanned_now += 1
except Exception as exc:
errors.append({"hash": h, "error": str(exc)})
by_hash: dict[str, list[dict]] = {}
counts: dict[str, dict] = {}
for h in clean_hashes:
items = []
seen = set()
for item in cached.get(h, []):
domain = tracker_domain(str(item.get("url") or item.get("domain") or "")) or str(item.get("domain") or "")
if not domain or domain in seen:
continue
seen.add(domain)
row = {"domain": domain, "url": str(item.get("url") or "")}
items.append(row)
bucket = counts.setdefault(domain, {"domain": domain, "url": row["url"], "count": 0})
bucket["count"] += 1
if not bucket.get("url") and row["url"]:
bucket["url"] = row["url"]
by_hash[h] = items
trackers = sorted(counts.values(), key=lambda x: (-int(x.get("count") or 0), str(x.get("domain") or "")))
if include_favicons:
# Note: Summary returns only already cached static favicon URLs; network favicon discovery stays outside the hot tracker count path.
for item in trackers:
item["favicon_url"] = favicon_public_url(str(item.get("domain") or ""), enabled=True, create=False)
pending = max(0, len([h for h in clean_hashes if h not in fresh]))
return {"hashes": by_hash, "trackers": trackers, "errors": errors[:25], "scanned": len(clean_hashes), "scanned_now": scanned_now, "pending": pending, "cached": len(clean_hashes) - pending}
def _scan_lock(profile_id: int) -> threading.Lock:
with _TRACKER_SCAN_LOCKS_GUARD:
if profile_id not in _TRACKER_SCAN_LOCKS:
_TRACKER_SCAN_LOCKS[profile_id] = threading.Lock()
return _TRACKER_SCAN_LOCKS[profile_id]
def warm_summary_cache(profile: dict, hashes: list[str], loader, batch_size: int = TRACKER_SCAN_LIMIT) -> bool:
"""Start a non-blocking tracker cache warmup for large libraries."""
# Note: Tracker cache warming runs in one background thread per profile, so F5 returns cached data immediately instead of waiting for rTorrent scans.
profile_id = int(profile.get("id") or 0)
clean_hashes = [str(h or "").strip() for h in hashes if str(h or "").strip()]
if not profile_id or not clean_hashes:
return False
lock = _scan_lock(profile_id)
if lock.locked():
return False
def _worker():
if not lock.acquire(blocking=False):
return
try:
while True:
result = summary(profile, clean_hashes, loader, scan_limit=max(1, int(batch_size or TRACKER_SCAN_LIMIT)), include_favicons=False)
if int(result.get("pending") or 0) <= 0 or int(result.get("scanned_now") or 0) <= 0:
break
time.sleep(0.05)
finally:
lock.release()
threading.Thread(target=_worker, name=f"tracker-cache-warm-{profile_id}", daemon=True).start()
return True
def favicon_public_url(domain: str, enabled: bool = True, create: bool = False, force: bool = False) -> str:
"""Return the static URL for a cached tracker favicon, optionally creating or refreshing it first."""
# Note: Favicon files stay in data/tracker_favicons, but the browser loads them via the static/tracker_favicons symlink.
clean = tracker_domain(domain)
if not enabled or not clean:
return ""
if create:
favicon_path(clean, enabled=True, force=force)
cached = _cached_favicon(clean)
now = _now_epoch()
if not cached or now - float(cached.get("updated_epoch") or 0) >= FAVICON_CACHE_TTL_SECONDS:
return ""
path = Path(str(cached.get("file_path") or ""))
if not path.exists() or not path.is_file():
return ""
try:
rel = path.resolve().relative_to(FAVICON_DIR.resolve())
except Exception:
rel = Path(path.name)
return f"{PUBLIC_FAVICON_BASE}/{urllib.parse.quote(str(rel).replace(chr(92), '/'))}"
def _fetch(url: str, limit: int = 262144) -> tuple[bytes, str, str]:
# Note: Favicon discovery uses browser-like headers and a certificate fallback, because tracker login pages/CDNs often reject minimal Python requests.
req = urllib.request.Request(
url,
headers={
"User-Agent": "Mozilla/5.0 (compatible; pyTorrent favicon fetcher)",
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/png,image/svg+xml,image/*,*/*;q=0.8",
"Connection": "close",
},
)
def _read(context=None):
with urllib.request.urlopen(req, timeout=8, context=context) as resp:
data = resp.read(limit + 1)
if len(data) > limit:
data = data[:limit]
content_type = str(resp.headers.get("Content-Type") or "").split(";", 1)[0].strip().lower()
final_url = str(resp.geturl() or url)
return data, content_type, final_url
try:
return _read()
except urllib.error.URLError as exc:
reason = getattr(exc, "reason", None)
if isinstance(reason, ssl.SSLError) or "CERTIFICATE_VERIFY_FAILED" in str(exc):
return _read(ssl._create_unverified_context())
raise
def _is_icon(data: bytes, content_type: str, url: str) -> bool:
"""Validate that downloaded bytes are a browser-readable image, not only an image-like HTTP header."""
# Note: Some trackers serve a broken /favicon.ico with image/vnd.microsoft.icon; pyTorrent now validates bytes before caching it.
if not data or len(data) < 16:
return False
head = data[:32]
lower = data[:512].lstrip().lower()
if head.startswith(b"\x00\x00\x01\x00") or head.startswith(b"\x00\x00\x02\x00"):
try:
count = int.from_bytes(data[4:6], "little")
except Exception:
count = 0
return 0 < count <= 256 and len(data) >= 6 + (16 * count)
if head.startswith(b"\x89PNG\r\n\x1a\n"):
return True
if head.startswith(b"\xff\xd8\xff"):
return True
if head.startswith((b"GIF87a", b"GIF89a")):
return True
if head.startswith(b"RIFF") and data[8:12] == b"WEBP":
return True
if lower.startswith(b"<svg") or b"<svg" in lower[:256]:
return True
ctype = content_type.lower()
if ctype in {"image/svg+xml"}:
return b"<svg" in lower[:512]
return False
def _attr_value(tag: str, name: str) -> str:
# Note: Accept quoted and unquoted HTML attributes so favicon discovery works with compact/minified tracker pages.
match = re.search(rf"\b{name}\s*=\s*(['\"])(.*?)\1", tag, re.I | re.S)
if match:
return match.group(2).strip()
match = re.search(rf"\b{name}\s*=\s*([^\s>]+)", tag, re.I | re.S)
return match.group(1).strip().strip("'\"") if match else ""
def _extract_icon_hrefs(html: str) -> list[str]:
# Note: Read any <link rel=...icon... href=...> order, including shortcut icon and relative CDN paths.
hrefs: list[str] = []
parser = _IconParser()
try:
parser.feed(html)
hrefs.extend(parser.icons)
except Exception:
pass
for match in re.finditer(r"<link\b[^>]*>", html, re.I | re.S):
tag = match.group(0)
rel = _attr_value(tag, "rel").lower()
href = _attr_value(tag, "href")
if href and "icon" in rel:
hrefs.append(href)
clean = []
seen = set()
for href in hrefs:
href = str(href or "").strip()
if href and href not in seen:
seen.add(href)
clean.append(href)
return clean
def _tracker_icon_hosts(domain: str) -> list[str]:
host = tracker_domain(domain)
root = _root_domain(host)
# Note: Direct favicon fallback checks the tracker host first, then the main domain.
return [h for h in dict.fromkeys([host, root]) if h]
def _tracker_html_hosts(domain: str) -> list[str]:
host = tracker_domain(domain)
root = _root_domain(host)
# Note: HTML discovery checks the main site first, because tracker announce hosts often return text/plain.
return [h for h in dict.fromkeys([root, host]) if h]
def _favicon_candidates(domain: str) -> list[str]:
candidates = []
for h in _tracker_icon_hosts(domain):
candidates.extend([f"https://{h}/favicon.ico", f"http://{h}/favicon.ico"])
return list(dict.fromkeys(candidates))
def _html_icon_candidates(domain: str, errors: list[str] | None = None) -> list[str]:
urls = []
for h in _tracker_html_hosts(domain):
for scheme in ("https", "http"):
base = f"{scheme}://{h}/"
try:
data, ctype, final_url = _fetch(base, limit=524288)
except Exception as exc:
if errors is not None:
errors.append(f"{base}: {exc}")
continue
lower = data[:4096].lower()
if "html" not in ctype and b"<html" not in lower and b"<link" not in data.lower():
if errors is not None:
errors.append(f"{base}: response is not html ({ctype or 'unknown content-type'})")
continue
html = data.decode("utf-8", errors="ignore")
for href in _extract_icon_hrefs(html):
urls.append(urllib.parse.urljoin(final_url, href))
return list(dict.fromkeys(urls))
def _cached_favicon(domain: str):
clean = tracker_domain(domain)
if not clean:
return None
with connect() as conn:
return conn.execute("SELECT * FROM tracker_favicon_cache WHERE domain=?", (clean,)).fetchone()
def favicon_cache_row(domain: str):
"""Note: Expose the favicon cache row for diagnostics without duplicating SQL in routes or CLI."""
return _cached_favicon(domain)
def favicon_path(domain: str, enabled: bool = True, force: bool = False) -> tuple[Path | None, str | None]:
clean = tracker_domain(domain)
if not enabled or not clean:
return None, None
cached = _cached_favicon(clean)
now = _now_epoch()
if cached and not force and now - float(cached.get("updated_epoch") or 0) < FAVICON_CACHE_TTL_SECONDS:
path = Path(str(cached.get("file_path") or ""))
mime = str(cached.get("mime_type") or mimetypes.guess_type(path.name)[0] or "image/x-icon")
if path.exists() and path.is_file():
try:
if _is_icon(path.read_bytes()[:524288], mime, str(cached.get("source_url") or path.name)):
return path, mime
except Exception:
pass
if cached.get("error"):
return None, None
# Note: Favicon lookup checks the main-domain HTML first, then tracker HTML, then direct /favicon.ico fallbacks.
FAVICON_DIR.mkdir(parents=True, exist_ok=True)
errors = []
candidates = _html_icon_candidates(clean, errors) + _favicon_candidates(clean)
candidates = list(dict.fromkeys(candidates))
idx = 0
while idx < len(candidates):
url = candidates[idx]
idx += 1
try:
data, ctype, final_url = _fetch(url, limit=524288)
if not _is_icon(data, ctype, final_url):
errors.append(f"{url}: invalid icon ({ctype or 'unknown content-type'}, {len(data)} bytes)")
continue
ext = Path(urllib.parse.urlparse(final_url).path).suffix.lower() or mimetypes.guess_extension(ctype) or ".ico"
if ext not in {".ico", ".png", ".jpg", ".jpeg", ".svg", ".webp"}:
ext = ".ico"
path = FAVICON_DIR / f"{_safe_filename(clean)}{ext}"
path.write_bytes(data)
mime = ctype if ctype.startswith("image/") else (mimetypes.guess_type(path.name)[0] or "image/x-icon")
with connect() as conn:
conn.execute(
"""
INSERT INTO tracker_favicon_cache(domain, source_url, file_path, mime_type, updated_at, updated_epoch, error)
VALUES(?, ?, ?, ?, ?, ?, NULL)
ON CONFLICT(domain) DO UPDATE SET
source_url=excluded.source_url,
file_path=excluded.file_path,
mime_type=excluded.mime_type,
updated_at=excluded.updated_at,
updated_epoch=excluded.updated_epoch,
error=NULL
""",
(clean, final_url, str(path), mime, utcnow(), now),
)
return path, mime
except Exception as exc:
errors.append(f"{url}: {exc}")
# HTML is checked once before direct /favicon.ico probes; do not guess cdn/static/www hosts unless HTML points there.
with connect() as conn:
conn.execute(
"""
INSERT INTO tracker_favicon_cache(domain, source_url, file_path, mime_type, updated_at, updated_epoch, error)
VALUES(?, '', '', '', ?, ?, ?)
ON CONFLICT(domain) DO UPDATE SET
updated_at=excluded.updated_at,
updated_epoch=excluded.updated_epoch,
error=excluded.error
""",
(clean, utcnow(), now, "; ".join(errors[-8:]) or "favicon not found"),
)
return None, None

View File

@@ -7,7 +7,7 @@ from ..config import POLL_INTERVAL
from .preferences import active_profile, get_profile from .preferences import active_profile, get_profile
from .torrent_cache import torrent_cache from .torrent_cache import torrent_cache
from .torrent_summary import cached_summary from .torrent_summary import cached_summary
from . import rtorrent, smart_queue, traffic_history, automation_rules, torrent_stats, auth from . import rtorrent, smart_queue, traffic_history, automation_rules, torrent_stats, auth, speed_peaks
def _profile_room(profile_id: int) -> str: def _profile_room(profile_id: int) -> str:
@@ -59,6 +59,8 @@ def register_socketio_handlers(socketio):
status["usage_available"] = True status["usage_available"] = True
status["profile_id"] = pid status["profile_id"] = pid
traffic_history.record(pid, status.get("down_rate", 0), status.get("up_rate", 0), status.get("total_down", 0), status.get("total_up", 0)) traffic_history.record(pid, status.get("down_rate", 0), status.get("up_rate", 0), status.get("total_down", 0), status.get("total_up", 0))
# Notatka: najwyższe DL/UL są liczone w tle razem z istniejącym pollerem i zapisywane tylko po przebiciu rekordu.
status["speed_peaks"] = speed_peaks.record(pid, status.get("down_rate", 0), status.get("up_rate", 0))
_emit_profile(socketio, "system_stats", status, pid) _emit_profile(socketio, "system_stats", status, pid)
heartbeat["ok"] = True heartbeat["ok"] = True
except Exception as exc: except Exception as exc:
@@ -73,8 +75,8 @@ def register_socketio_handlers(socketio):
result = smart_queue.check(profile, force=False) result = smart_queue.check(profile, force=False)
if result.get("enabled"): if result.get("enabled"):
_emit_profile(socketio, "smart_queue_update", result, pid) _emit_profile(socketio, "smart_queue_update", result, pid)
if result.get("paused") or result.get("resumed") or result.get("resume_requested"): if result.get("stopped") or result.get("started") or result.get("start_requested") or result.get("paused") or result.get("resumed"):
# Note: After Smart Queue changes, refresh cache immediately so the Downloading list does not wait for the next poller cycle. # Note: Note: After Smart Queue STOP/START changes, refresh cache immediately so the Downloading list does not wait for the next poller cycle.
queue_diff = torrent_cache.refresh(profile) queue_diff = torrent_cache.refresh(profile)
if queue_diff.get("ok"): if queue_diff.get("ok"):
_emit_profile(socketio, "torrent_patch", {**queue_diff, "summary": cached_summary(pid, torrent_cache.snapshot(pid), force=True)}, pid) _emit_profile(socketio, "torrent_patch", {**queue_diff, "summary": cached_summary(pid, torrent_cache.snapshot(pid), force=True)}, pid)

File diff suppressed because one or more lines are too long

View File

@@ -0,0 +1,9 @@
<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 64 64">
<rect x="14" y="20" width="36" height="30" rx="8" fill="#f8fafc" stroke="#0f172a" stroke-width="4"></rect>
<rect x="22" y="30" width="6" height="6" rx="3" fill="#0f172a"></rect>
<rect x="36" y="30" width="6" height="6" rx="3" fill="#0f172a"></rect>
<path d="M25 42h14" stroke="#0f172a" stroke-width="4" stroke-linecap="round"></path>
<path d="M32 20V10" stroke="#0f172a" stroke-width="4" stroke-linecap="round"></path>
<circle cx="32" cy="8" r="4" fill="#0f172a"></circle>
<path d="M14 34H8M56 34h-6" stroke="#0f172a" stroke-width="4" stroke-linecap="round"></path>
</svg>

After

Width:  |  Height:  |  Size: 647 B

View File

@@ -1,9 +1,10 @@
:root { :root {
--app-font-family: --app-font-family:
Inter, system-ui, -apple-system, Segoe UI, Roboto, Arial, sans-serif; Inter, system-ui, -apple-system, Segoe UI, Roboto, Arial, sans-serif;
--topbar: 50px; --ui-scale: 1;
--statusbar: 34px; --topbar: calc(50px * var(--ui-scale));
--sidebar: 270px; --statusbar: calc(34px * var(--ui-scale));
--sidebar: calc(270px * var(--ui-scale));
--torrent-progress-complete: #198754; --torrent-progress-complete: #198754;
} }
[data-bs-theme="dark"] { [data-bs-theme="dark"] {
@@ -49,13 +50,13 @@ body {
} }
body { body {
overflow: hidden; overflow: hidden;
font-size: 13px; font-size: calc(13px * var(--ui-scale));
padding: 8px; padding: calc(8px * var(--ui-scale));
background: #05070a; background: #05070a;
font-family: var(--app-font-family); font-family: var(--app-font-family);
} }
.app-shell { .app-shell {
height: calc(100vh - 16px); height: calc(100vh - (16px * var(--ui-scale)));
display: grid; display: grid;
grid-template-rows: var(--topbar) 1fr var(--statusbar); grid-template-rows: var(--topbar) 1fr var(--statusbar);
background: var(--bs-body-bg); background: var(--bs-body-bg);
@@ -126,6 +127,11 @@ body {
color: var(--bs-body-color); color: var(--bs-body-color);
font-weight: 700; font-weight: 700;
} }
.mobile-speed-stats span {
display: inline-flex;
align-items: center;
gap: 0.18rem;
}
.topbar .form-control, .topbar .form-control,
.topbar .form-select { .topbar .form-select {
height: 32px; height: 32px;
@@ -218,8 +224,9 @@ body {
display: grid; display: grid;
grid-template-columns: var(--sidebar) 1fr; grid-template-columns: var(--sidebar) 1fr;
} }
/* Note: Sidebar filters are denser so large tracker lists fit better on one screen. */
.sidebar { .sidebar {
padding: 0.65rem; padding: 0.5rem;
overflow: auto; overflow: auto;
background: rgba(var(--bs-secondary-bg-rgb), 0.9); background: rgba(var(--bs-secondary-bg-rgb), 0.9);
} }
@@ -227,10 +234,10 @@ body {
width: 100%; width: 100%;
display: grid; display: grid;
grid-template-columns: minmax(0, 1fr) auto; grid-template-columns: minmax(0, 1fr) auto;
gap: 0.15rem 0.55rem; gap: 0.1rem 0.45rem;
align-items: center; align-items: center;
margin-bottom: 0.2rem; margin-bottom: 0.12rem;
padding: 0.45rem 0.6rem; padding: 0.34rem 0.5rem;
border: 0; border: 0;
border-radius: 0.55rem; border-radius: 0.55rem;
background: transparent; background: transparent;
@@ -379,6 +386,28 @@ body {
.detail-table { .detail-table {
white-space: nowrap; white-space: nowrap;
} }
.responsive-table-wrap {
max-width: 100%;
overflow-x: auto;
border: 1px solid var(--bs-border-color);
border-radius: 0.6rem;
-webkit-overflow-scrolling: touch;
}
.responsive-table-wrap .detail-table {
margin-bottom: 0;
}
.smart-exclusions-table {
min-width: 680px;
}
.smart-history-table {
min-width: 760px;
table-layout: fixed;
}
.smart-history-table th,
.smart-history-table td {
overflow-wrap: anywhere;
white-space: normal;
}
.general-grid { .general-grid {
display: grid; display: grid;
grid-template-columns: repeat(3, minmax(0, 1fr)); grid-template-columns: repeat(3, minmax(0, 1fr));
@@ -413,6 +442,11 @@ body {
.statusbar b { .statusbar b {
color: var(--bs-body-color); color: var(--bs-body-color);
} }
.speed-peaks {
display: inline-flex;
align-items: center;
gap: 0.25rem;
}
.status-limit { .status-limit {
border: 1px solid var(--bs-border-color); border: 1px solid var(--bs-border-color);
background: rgba(var(--bs-secondary-bg-rgb), 0.9); background: rgba(var(--bs-secondary-bg-rgb), 0.9);
@@ -625,18 +659,6 @@ body {
:root { :root {
--topbar: 132px; --topbar: 132px;
} }
.toolbar-right {
width: 100%;
justify-content: flex-start;
flex-wrap: nowrap;
gap: 0.35rem;
}
.search {
flex: 1 1 0;
width: auto;
min-width: 0;
max-width: none;
}
.preset-grid { .preset-grid {
grid-template-columns: 1fr 1fr; grid-template-columns: 1fr 1fr;
} }
@@ -657,6 +679,35 @@ body {
font-weight: 700; font-weight: 700;
text-transform: uppercase; text-transform: uppercase;
} }
/* Note: Browser title speed preference uses a two-column switch layout, so text aligns with the switch. */
.browser-speed-pref {
display: grid;
grid-template-columns: auto minmax(0, 1fr);
align-items: center;
column-gap: 0.75rem;
row-gap: 0.2rem;
min-height: 58px;
margin: 0;
padding: 0.55rem 0.75rem;
border: 1px solid var(--bs-border-color);
border-radius: 0.65rem;
background: rgba(var(--bs-secondary-bg-rgb), 0.35);
}
.browser-speed-pref .form-check-input {
grid-row: 1 / span 2;
grid-column: 1;
float: none;
margin: 0;
}
.browser-speed-pref .form-check-label {
grid-column: 2;
line-height: 1.2;
}
.browser-speed-pref small {
grid-column: 2;
color: var(--bs-secondary-color);
line-height: 1.2;
}
@media (max-width: 640px) { @media (max-width: 640px) {
.preferences-grid { .preferences-grid {
grid-template-columns: 1fr; grid-template-columns: 1fr;
@@ -737,6 +788,7 @@ body {
} }
.mobile-actions { .mobile-actions {
display: flex; display: flex;
flex-wrap: wrap;
gap: 0.35rem; gap: 0.35rem;
margin-top: 0.45rem; margin-top: 0.45rem;
} }
@@ -763,7 +815,7 @@ body.mobile-mode #mobileList {
overflow: auto; overflow: auto;
position: relative; position: relative;
z-index: 2; z-index: 2;
padding-top: 5.2rem !important; padding-top: 7.1rem !important;
padding-bottom: 1rem; padding-bottom: 1rem;
} }
body.mobile-mode .content { body.mobile-mode .content {
@@ -790,6 +842,18 @@ body.mobile-mode .main-grid {
width: 110px; width: 110px;
white-space: nowrap; white-space: nowrap;
} }
.mobile-sort-row {
display: flex;
margin-top: 0.4rem;
justify-content: flex-end;
gap: 0.5rem;
}
.mobile-sort-row .btn {
width: 100%;
justify-content: center;
}
.hidden-col { .hidden-col {
display: none !important; display: none !important;
} }
@@ -811,15 +875,52 @@ body.mobile-mode .main-grid {
border-radius: 0.5rem; border-radius: 0.5rem;
background: var(--bs-body-bg); background: var(--bs-body-bg);
} }
.label-filters .label-filter { .label-filters .label-filter,
font-size: 0.82rem; .tracker-filters .tracker-filter {
padding: 0.34rem 0.5rem; font-size: 0.78rem;
margin-bottom: 0.15rem; margin-bottom: 0.08rem;
padding: 0.26rem 0.44rem;
} }
.label-filters .label-filter i { .label-filters .label-filter i,
.tracker-filters .tracker-filter i {
opacity: 0.75; opacity: 0.75;
margin-right: 0.25rem; margin-right: 0.25rem;
} }
.tracker-filters .tracker-filter span:first-child {
align-items: center;
display: inline-flex;
gap: 0.35rem;
min-width: 0;
}
.tracker-favicon {
border-radius: 0.2rem;
flex: 0 0 auto;
height: 14px;
object-fit: contain;
width: 14px;
}
.tracker-favicon:not(.d-none) + .tracker-fallback-icon {
display: none;
}
.tracker-filter-empty {
align-items: center;
color: var(--bs-secondary-color);
display: flex;
font-size: 0.76rem;
gap: 0.3rem;
padding: 0.2rem 0.44rem;
}
/* Note: Empty tracker state uses the same sidebar spacing as regular filter rows. */
.tracker-filter-empty .spinner-border-xs {
height: 0.65rem;
width: 0.65rem;
}
.column-manager { .column-manager {
display: grid; display: grid;
grid-template-columns: repeat(auto-fill, minmax(170px, 1fr)); grid-template-columns: repeat(auto-fill, minmax(170px, 1fr));
@@ -896,13 +997,6 @@ body.mobile-mode .mobile-card {
align-items: end; align-items: end;
justify-content: flex-start; justify-content: flex-start;
} }
#trafficHistoryChart {
width: 100%;
height: 420px;
border: 1px solid var(--bs-border-color);
border-radius: 0.75rem;
background: var(--bs-body-bg);
}
@media (max-width: 992px) { @media (max-width: 992px) {
.profile-form-grid { .profile-form-grid {
grid-template-columns: 1fr; grid-template-columns: 1fr;
@@ -1138,19 +1232,12 @@ body.mobile-mode .mobile-card {
} }
} }
@media (max-width: 640px) { @media (max-width: 640px) {
.toolbar-right {
flex-wrap: nowrap !important;
gap: 0.3rem !important;
}
.search {
min-width: 0 !important;
width: auto !important;
flex: 1 1 0 !important;
max-width: none !important;
}
.mobile-speed-stats { .mobile-speed-stats {
gap: 0.25rem; align-items: flex-start;
flex-direction: column;
gap: 0.08rem;
font-size: 0.66rem; font-size: 0.66rem;
line-height: 1.05;
} }
} }
@@ -1338,6 +1425,35 @@ body.mobile-mode .mobile-card {
white-space: normal; white-space: normal;
word-break: break-word; word-break: break-word;
} }
/* Note: Smart Queue stats are reusable because they are shown in App status. */
.automation-smart-stats {
display: grid;
grid-template-columns: repeat(auto-fit, minmax(120px, 1fr));
gap: 0.5rem;
margin: 0.5rem 0 0.75rem;
}
.automation-smart-stat {
min-width: 0;
padding: 0.5rem 0.6rem;
border: 1px solid var(--bs-border-color);
border-radius: 0.6rem;
background: rgba(var(--bs-secondary-bg-rgb), 0.28);
}
.automation-smart-stat span,
.automation-smart-stat small {
display: block;
color: var(--bs-secondary-color);
font-size: 0.72rem;
line-height: 1.2;
}
.automation-smart-stat b {
display: block;
overflow: hidden;
font-size: 1rem;
line-height: 1.3;
text-overflow: ellipsis;
white-space: nowrap;
}
.automation-history-toolbar { .automation-history-toolbar {
display: flex; display: flex;
justify-content: flex-end; justify-content: flex-end;
@@ -1346,6 +1462,7 @@ body.mobile-mode .mobile-card {
/* Note: Automation history has fixed compact metadata columns and a flexible Actions column, so long JSON cannot overlap Time/Rule. */ /* Note: Automation history has fixed compact metadata columns and a flexible Actions column, so long JSON cannot overlap Time/Rule. */
.automation-history-table { .automation-history-table {
width: 100%; width: 100%;
min-width: 760px;
table-layout: fixed; table-layout: fixed;
white-space: normal; white-space: normal;
} }
@@ -1504,11 +1621,12 @@ body.mobile-mode .mobile-card {
} }
.mobile-filter-actions, .mobile-filter-actions,
.mobile-filter-select-row { .mobile-filter-select-row {
display: flex;
align-items: center; align-items: center;
display: flex;
gap: 0.35rem; gap: 0.35rem;
} }
.mobile-filter-actions { .mobile-filter-actions {
flex-wrap: wrap;
margin-bottom: 0.4rem; margin-bottom: 0.4rem;
} }
.mobile-filter-actions span { .mobile-filter-actions span {
@@ -1533,7 +1651,7 @@ body.mobile-mode .mobile-filter-bar {
display: block !important; display: block !important;
} }
#mobileList { #mobileList {
padding-top: 5.2rem !important; padding-top: 7.1rem !important;
} }
.topbar .badge { .topbar .badge {
width: 0.72rem; width: 0.72rem;
@@ -2065,16 +2183,20 @@ body.mobile-mode .mobile-filter-bar {
background: rgba(var(--bs-secondary-bg-rgb), 0.28); background: rgba(var(--bs-secondary-bg-rgb), 0.28);
} }
/* Note: Smart Queue switch resets Bootstrap's negative switch offset so it cannot overflow narrow frames. */
.smart-toggle-row .form-check { .smart-toggle-row .form-check {
display: flex; display: flex;
align-items: center; align-items: center;
justify-content: flex-end;
flex: 0 0 auto;
min-height: 0; min-height: 0;
margin: 0; margin: 0;
padding-left: 2.25rem; padding-left: 0;
} }
.smart-toggle-row .form-check-input { .smart-toggle-row .form-check-input {
margin-top: 0; margin-top: 0;
margin-left: 0;
} }
.smart-setting-row .form-check-label, .smart-setting-row .form-check-label,
@@ -2149,7 +2271,7 @@ body.mobile-mode .mobile-filter-bar {
} }
.smart-toggle-row .form-check { .smart-toggle-row .form-check {
padding-left: 0; justify-content: flex-start;
} }
} }
@@ -2204,6 +2326,34 @@ body.mobile-mode .mobile-filter-bar {
color: var(--bs-secondary-color); color: var(--bs-secondary-color);
} }
.about-summary-grid {
display: grid;
grid-template-columns: repeat(auto-fit, minmax(160px, 1fr));
gap: 0.6rem;
}
.about-summary-grid div {
padding: 0.7rem;
border: 1px solid var(--bs-border-color);
border-radius: 0.75rem;
background: rgba(var(--bs-secondary-bg-rgb), 0.28);
}
.about-summary-grid b,
.about-summary-grid span {
display: block;
}
.about-summary-grid b {
margin-bottom: 0.2rem;
}
.about-summary-grid span {
color: var(--bs-secondary-color);
font-size: 0.82rem;
}
.about-list { .about-list {
display: grid; display: grid;
gap: 0.55rem; gap: 0.55rem;
@@ -2336,7 +2486,3 @@ body.mobile-mode .mobile-filter-bar {
white-space: normal; white-space: normal;
} }
.automation-history-scroll {
width: 100%;
overflow-x: auto;
}

View File

@@ -0,0 +1 @@
../../data/tracker_favicons

View File

@@ -4,6 +4,8 @@
<meta charset="utf-8"> <meta charset="utf-8">
<meta name="viewport" content="width=device-width, initial-scale=1"> <meta name="viewport" content="width=device-width, initial-scale=1">
<title>pyTorrent {{ code }}</title> <title>pyTorrent {{ code }}</title>
<link rel="icon" href="/static/favicon.svg" type="image/svg+xml">
<link rel="shortcut icon" href="/static/favicon.svg" type="image/svg+xml">
<link href="{{ bootstrap_theme_url('default') }}" rel="stylesheet"> <link href="{{ bootstrap_theme_url('default') }}" rel="stylesheet">
<link href="{{ frontend_asset_url('fontawesome_css') }}" rel="stylesheet"> <link href="{{ frontend_asset_url('fontawesome_css') }}" rel="stylesheet">
<link href="{{ static_url('styles.css') }}" rel="stylesheet"> <link href="{{ static_url('styles.css') }}" rel="stylesheet">

File diff suppressed because one or more lines are too long

View File

@@ -4,6 +4,8 @@
<meta charset="utf-8"> <meta charset="utf-8">
<meta name="viewport" content="width=device-width, initial-scale=1"> <meta name="viewport" content="width=device-width, initial-scale=1">
<title>pyTorrent login</title> <title>pyTorrent login</title>
<link rel="icon" href="/static/favicon.svg" type="image/svg+xml">
<link rel="shortcut icon" href="/static/favicon.svg" type="image/svg+xml">
<link href="{{ bootstrap_theme_url('default') }}" rel="stylesheet"> <link href="{{ bootstrap_theme_url('default') }}" rel="stylesheet">
<link href="{{ frontend_asset_url('fontawesome_css') }}" rel="stylesheet"> <link href="{{ frontend_asset_url('fontawesome_css') }}" rel="stylesheet">
<link href="{{ static_url('styles.css') }}" rel="stylesheet"> <link href="{{ static_url('styles.css') }}" rel="stylesheet">

View File

@@ -1,4 +1,3 @@
from pytorrent import create_app from pytorrent import create_app
# Note: Gunicorn imports this object; background Socket.IO tasks still start through create_app().
app = create_app() app = create_app()