commit 4fac1716a6a5297c14a847a9330fda9cfbad8190 Author: gru Date: Sun May 3 12:35:57 2026 +0100 first commit diff --git a/.env.example b/.env.example new file mode 100644 index 0000000..08e48a8 --- /dev/null +++ b/.env.example @@ -0,0 +1,16 @@ +PYTORRENT_SECRET_KEY=change-me +PYTORRENT_DB_PATH=data/pytorrent.sqlite3 +PYTORRENT_HOST=0.0.0.0 +PYTORRENT_PORT=8090 +PYTORRENT_DEBUG=0 +PYTORRENT_POLL_INTERVAL=1.0 +PYTORRENT_WORKERS=16 +PYTORRENT_GEOIP_DB=data/GeoLite2-City.mmdb + +# Retention / Smart Queue +PYTORRENT_TRAFFIC_HISTORY_RETENTION_DAYS=90 +PYTORRENT_JOBS_RETENTION_DAYS=30 +PYTORRENT_SMART_QUEUE_HISTORY_RETENTION_DAYS=30 +PYTORRENT_LOG_RETENTION_DAYS=30 +PYTORRENT_SMART_QUEUE_LABEL="Smart Queue Paused" + diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..4fbc654 --- /dev/null +++ b/.gitignore @@ -0,0 +1,39 @@ +# Python +__pycache__/ +*.py[cod] +*.pyo + +# Virtualenv +venv/ +.env +.venv + +# App data +*.log + +# OS +.DS_Store +Thumbs.db + +# IDE +.vscode/ +.idea/ + +# Tests / cache +.pytest_cache/ +.mypy_cache/ + +# Build +dist/ +build/ +*.egg-info/ + +storage/* +*.zip + +*.sqlite3-shm +*.sqlite3 +data/* +logs/* + +todo.txt diff --git a/README.md b/README.md new file mode 100644 index 0000000..8c04f70 --- /dev/null +++ b/README.md @@ -0,0 +1,74 @@ +# pyTorrent + +Monopage web UI dla rTorrent inspirowany workflow ruTorrent. + +## Funkcje + +- Flask + Flask-SocketIO. +- SQLite na preferencje, profile SCGI, motyw Bootstrapa i font UI. +- Dowolna liczba profili rTorrent per user. +- Profile można dodawać i edytować z UI; flaga zdalnej lokalizacji ukrywa CPU/RAM hosta aplikacji, żeby nie mylić ich z zasobami zdalnego rTorrenta; publiczny IP dla port check jest dalej sprawdzany zdalnie, jeśli rTorrent to obsługuje. +- Przełączanie aktywnego rTorrent z UI. +- Live lista torrentów przez WebSocket. +- Cache aplikacyjny i wysyłanie patchy bez przeładowywania całej tabeli. +- Operacje usera wykonywane w ThreadPoolExecutor. +- Akcje `move` i `remove` są wykonywane per profil w kolejności zlecenia, więc późniejsze usunięcie poczeka na wcześniejsze przenoszenia. +- Log jobsów pokazuje krótką datę i godzinę w tabeli oraz pełny timestamp w tooltipie. +- Masowe start/pause/stop/resume/recheck/remove/move. +- Move obsługuje `move_data=true`, który fizycznie przenosi dane po stronie rTorrent w tle i odpytuje plik statusu, dzięki czemu długie `mv` nie kończy się timeoutem SCGI; jeśli cel już istnieje, jest nadpisywany (`force`), a timeouty z `mkdir`/startu/pollingu move nie przerywają operacji. Potem aktualizuje katalog torrenta, a `recheck` domyślnie włącza się przy fizycznym przenoszeniu. +- Modal dodawania wielu magnetów. +- Dolny status bar: CPU, RAM, wersja rTorrent, prędkości, limity, total DL/UP oraz status portu, gdy port check jest włączony. +- Prawoklik na torrentach. +- Skróty klawiaturowe. +- Szczegóły: General, Files, Peers, Trackers, Log. +- Smart Queue pokazuje domyślnie 10 ostatnich operacji; można rozwinąć historię do 100 wpisów. +- GeoIP peerów z MaxMind GeoLite2-City.mmdb, z cache IP. +- Cache-busting statyków przez MD5 i nagłówki cache. +- Preferencje wyglądu: domyślny Bootstrap albo Bootswatch: Flatly, Litera, Lumen, Minty, Sketchy, Solar, Spacelab, United, Zephyr. +- Preferencje fontu: domyślny font motywu, Adwaita Mono oraz dodatkowe pasujące fonty. + +## Uruchomienie + +```bash +./install.sh +. venv/bin/activate +python app.py +``` + +Domyślnie: `http://127.0.0.1:8090`. + +## Profil SCGI + +Przykład: + +```txt +scgi://127.0.0.1:5000/RPC2 +``` + +Po stronie rTorrent: + +```txt +network.scgi.open_port = 127.0.0.1:5000 +``` + +## GeoIP + +Instalator pobiera bazę GeoLite2-City jednorazowo do: + +```txt +data/GeoLite2-City.mmdb +``` + +Można też uruchomić ręcznie: + +```bash +./scripts/download_geoip.sh +``` + +Skrypt używa głównego źródła `https://git.io/GeoLite2-City.mmdb`, a przy błędzie fallbacku `https://github.com/P3TERX/GeoLite.mmdb/raw/download/GeoLite2-City.mmdb`. Katalog `data` ma uprawnienia `755`, a plik bazy `644`. + +## API docs + +Dokumentacja OpenAPI jest dostępna pod `/docs`. Endpoint `/api/profiles` obsługuje `max_parallel_jobs` z domyślną wartością `5` oraz `is_remote`; `PUT /api/profiles/{profile_id}` edytuje istniejący profil. Endpoint `/api/preferences` obsługuje m.in. `theme`, `bootstrap_theme`, `font_family`, `table_columns_json`, `peers_refresh_seconds` i `port_check_enabled`. Endpoint `/api/port-check` zwraca status portu wraz z `checked_at`; dla zdalnego profilu publiczny IP jest pobierany przez rTorrent z fallbackami `ifconfig.co`, `ifconfig.me` i `ipapi.linuxiarz.pl`, jeśli dana konfiguracja rTorrenta wspiera zdalne polecenia, a metoda `POST` wymusza ponowny check z pominięciem cache. Endpoint `/api/system/status` dla zdalnego profilu zwraca `usage_available=false` i nie odczytuje CPU/RAM. + +`/api/openapi.json` zawiera reusable schemas dla głównych odpowiedzi API, w tym `TorrentListResponse`, `TorrentSummary`, `TorrentFilterSummary`, `CleanupSummary` i `AppStatus`. `GET /api/torrents` dokumentuje teraz pole `summary` używane przez sidebar filters. diff --git a/app.py b/app.py new file mode 100644 index 0000000..b1fba1f --- /dev/null +++ b/app.py @@ -0,0 +1,7 @@ +from pytorrent import create_app, socketio +from pytorrent.config import HOST, PORT, DEBUG + +app = create_app() + +if __name__ == "__main__": + socketio.run(app, host=HOST, port=PORT, debug=DEBUG, allow_unsafe_werkzeug=True) diff --git a/deploy/pytorrent.service b/deploy/pytorrent.service new file mode 100644 index 0000000..5692d50 --- /dev/null +++ b/deploy/pytorrent.service @@ -0,0 +1,29 @@ +# useradd --system --home /opt/pyTorrent --shell /usr/sbin/nologin pytorrent +# chown -R pytorrent:pytorrent /opt/pyTorrent + +[Unit] +Description=pyTorrent Web UI +After=network-online.target +Wants=network-online.target + +[Service] +Type=simple +#User=root +#Group=root +User=pytorrent +Group=pytorrent +WorkingDirectory=/opt/pyTorrent +Environment="PYTHONUNBUFFERED=1" +EnvironmentFile=/opt/pyTorrent/.env +ExecStart=/opt/pyTorrent/venv/bin/python /opt/pyTorrent/app.py +Restart=always +RestartSec=3 +KillSignal=SIGINT +TimeoutStopSec=20 + +# opcjonalnie +NoNewPrivileges=true +PrivateTmp=true + +[Install] +WantedBy=multi-user.target \ No newline at end of file diff --git a/install.sh b/install.sh new file mode 100755 index 0000000..3591a24 --- /dev/null +++ b/install.sh @@ -0,0 +1,12 @@ +#!/usr/bin/env bash +set -euo pipefail +python3 -m venv venv +. venv/bin/activate +pip install --upgrade pip +pip install -r requirements.txt +cp -n .env.example .env || true +mkdir -p data +chmod 755 data +./scripts/download_geoip.sh data/GeoLite2-City.mmdb +python -c "from pytorrent.db import init_db; init_db(); print(\"SQLite initialized\")" +echo "Run: . venv/bin/activate && python app.py" diff --git a/make_zip.py b/make_zip.py new file mode 100644 index 0000000..47d4687 --- /dev/null +++ b/make_zip.py @@ -0,0 +1,70 @@ +#!/usr/bin/env python3 +import os +import sys +import zipfile +import subprocess +from pathlib import Path + + +def run_git_command(args, repo_path: Path) -> bytes: + result = subprocess.run( + ["git", *args], + cwd=repo_path, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + check=True, + ) + return result.stdout + + +def get_files_to_archive(repo_path: Path) -> list[str]: + output = run_git_command( + ["ls-files", "--cached", "--others", "--exclude-standard", "-z"], + repo_path, + ) + files = output.decode("utf-8", errors="surrogateescape").split("\0") + return [f for f in files if f] + + +def make_zip(repo_path: Path, output_zip: Path) -> None: + files = get_files_to_archive(repo_path) + + output_zip = output_zip.resolve() + if output_zip.exists(): + output_zip.unlink() + + with zipfile.ZipFile(output_zip, "w", compression=zipfile.ZIP_DEFLATED) as zf: + for rel_path in files: + abs_path = repo_path / rel_path + + if not abs_path.exists(): + continue + + if abs_path.resolve() == output_zip: + continue + + zf.write(abs_path, arcname=rel_path) + + print(f"Utworzono archiwum: {output_zip}") + print(f"Dodano plików: {len(files)}") + + +def main(): + repo_path = Path.cwd() + + if len(sys.argv) > 1: + output_zip = Path(sys.argv[1]) + else: + output_zip = repo_path / f"{repo_path.name}.zip" + + try: + run_git_command(["rev-parse", "--show-toplevel"], repo_path) + except subprocess.CalledProcessError: + print("Błąd: ten katalog nie jest repozytorium Git.", file=sys.stderr) + sys.exit(1) + + make_zip(repo_path, output_zip) + + +if __name__ == "__main__": + main() diff --git a/pytorrent/__init__.py b/pytorrent/__init__.py new file mode 100644 index 0000000..c43cfc0 --- /dev/null +++ b/pytorrent/__init__.py @@ -0,0 +1,60 @@ +from __future__ import annotations + +from pathlib import Path +from flask import Flask, request, url_for +from flask_socketio import SocketIO +from .config import SECRET_KEY +from .db import init_db +from .utils import file_md5 + +socketio = SocketIO(cors_allowed_origins="*", ping_timeout=30, async_mode="threading") +_static_md5_cache: dict[tuple, str] = {} + + +def create_app() -> Flask: + app = Flask(__name__) + app.secret_key = SECRET_KEY + + @app.context_processor + def static_helpers(): + def static_url(filename: str) -> str: + path = Path(app.static_folder or "") / filename + try: + stat = path.stat() + key = (filename, stat.st_mtime_ns, stat.st_size) + version = _static_md5_cache.get(key) + if not version: + _static_md5_cache.clear() + version = file_md5(path) + _static_md5_cache[key] = version + return url_for("static", filename=filename, v=version) + except OSError: + return url_for("static", filename=filename) + return {"static_url": static_url} + + @app.after_request + def cache_headers(response): + response.headers.pop('Content-Disposition', None) + + if request.endpoint == "static": + response.headers["Cache-Control"] = "public, max-age=31536000, immutable" + else: + response.headers["Cache-Control"] = "no-cache, no-store, must-revalidate" + response.headers["Pragma"] = "no-cache" + response.headers["Expires"] = "0" + return response + + from .routes.main import bp as main_bp + from .routes.api import bp as api_bp + app.register_blueprint(main_bp) + app.register_blueprint(api_bp) + init_db() + + socketio.init_app(app) + from .services.workers import set_socketio + set_socketio(socketio) + from .services.websocket import register_socketio_handlers + register_socketio_handlers(socketio) + from .services.startup_config import schedule_startup_config_apply + schedule_startup_config_apply(socketio) + return app diff --git a/pytorrent/config.py b/pytorrent/config.py new file mode 100644 index 0000000..27a2587 --- /dev/null +++ b/pytorrent/config.py @@ -0,0 +1,36 @@ +from __future__ import annotations + +import os +from pathlib import Path +from dotenv import load_dotenv + +BASE_DIR = Path(__file__).resolve().parent.parent +load_dotenv(BASE_DIR / ".env") + +SECRET_KEY = os.getenv("PYTORRENT_SECRET_KEY", "dev-change-me") +DB_PATH = Path(os.getenv("PYTORRENT_DB_PATH", str(BASE_DIR / "data" / "pytorrent.sqlite3"))) +if not DB_PATH.is_absolute(): + DB_PATH = BASE_DIR / DB_PATH + +HOST = os.getenv("PYTORRENT_HOST", "0.0.0.0") +PORT = int(os.getenv("PYTORRENT_PORT", "8090")) +DEBUG = os.getenv("PYTORRENT_DEBUG", "0") == "1" +POLL_INTERVAL = float(os.getenv("PYTORRENT_POLL_INTERVAL", "1.0")) +WORKERS = int(os.getenv("PYTORRENT_WORKERS", "16")) +GEOIP_DB = Path(os.getenv("PYTORRENT_GEOIP_DB", str(BASE_DIR / "data" / "GeoLite2-City.mmdb"))) +if not GEOIP_DB.is_absolute(): + GEOIP_DB = BASE_DIR / GEOIP_DB + + +def _env_int(name: str, default: int, minimum: int = 0) -> int: + try: + return max(minimum, int(os.getenv(name, str(default)))) + except (TypeError, ValueError): + return default + + +TRAFFIC_HISTORY_RETENTION_DAYS = _env_int("PYTORRENT_TRAFFIC_HISTORY_RETENTION_DAYS", 90, 1) +JOBS_RETENTION_DAYS = _env_int("PYTORRENT_JOBS_RETENTION_DAYS", 30, 1) +SMART_QUEUE_HISTORY_RETENTION_DAYS = _env_int("PYTORRENT_SMART_QUEUE_HISTORY_RETENTION_DAYS", 30, 1) +LOG_RETENTION_DAYS = _env_int("PYTORRENT_LOG_RETENTION_DAYS", 30, 1) +SMART_QUEUE_LABEL = os.getenv("PYTORRENT_SMART_QUEUE_LABEL", "Smart Queue Paused") diff --git a/pytorrent/db.py b/pytorrent/db.py new file mode 100644 index 0000000..2585655 --- /dev/null +++ b/pytorrent/db.py @@ -0,0 +1,301 @@ +from __future__ import annotations + +import sqlite3 +from contextlib import contextmanager +from datetime import datetime, timezone +from .config import DB_PATH + +SCHEMA = """ +CREATE TABLE IF NOT EXISTS users ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + username TEXT UNIQUE NOT NULL, + password_hash TEXT, + created_at TEXT NOT NULL +); + +CREATE TABLE IF NOT EXISTS user_preferences ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + user_id INTEGER NOT NULL, + theme TEXT DEFAULT 'dark', + bootstrap_theme TEXT DEFAULT 'default', + font_family TEXT DEFAULT 'default', + active_rtorrent_id INTEGER, + table_columns_json TEXT, + keyboard_json TEXT, + mobile_mode INTEGER DEFAULT 0, + peers_refresh_seconds INTEGER DEFAULT 0, + port_check_enabled INTEGER DEFAULT 0, + created_at TEXT NOT NULL, + updated_at TEXT NOT NULL, + FOREIGN KEY(user_id) REFERENCES users(id) +); + +CREATE TABLE IF NOT EXISTS rtorrent_profiles ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + user_id INTEGER NOT NULL, + name TEXT NOT NULL, + scgi_url TEXT NOT NULL, + is_default INTEGER DEFAULT 0, + timeout_seconds INTEGER DEFAULT 5, + max_parallel_jobs INTEGER DEFAULT 5, + is_remote INTEGER DEFAULT 0, + created_at TEXT NOT NULL, + updated_at TEXT NOT NULL, + FOREIGN KEY(user_id) REFERENCES users(id) +); + +CREATE TABLE IF NOT EXISTS jobs ( + id TEXT PRIMARY KEY, + user_id INTEGER NOT NULL, + profile_id INTEGER, + action TEXT NOT NULL, + payload_json TEXT, + status TEXT NOT NULL, + attempts INTEGER DEFAULT 0, + max_attempts INTEGER DEFAULT 2, + error TEXT, + result_json TEXT, + created_at TEXT NOT NULL, + started_at TEXT, + finished_at TEXT, + updated_at TEXT NOT NULL +); + +CREATE INDEX IF NOT EXISTS idx_jobs_profile_status ON jobs(profile_id, status, created_at); + +CREATE TABLE IF NOT EXISTS labels ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + user_id INTEGER NOT NULL, + profile_id INTEGER, + name TEXT NOT NULL, + color TEXT DEFAULT '#64748b', + created_at TEXT NOT NULL, + updated_at TEXT NOT NULL, + UNIQUE(user_id, profile_id, name) +); + +CREATE TABLE IF NOT EXISTS ratio_groups ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + user_id INTEGER NOT NULL, + profile_id INTEGER, + name TEXT NOT NULL, + min_ratio REAL DEFAULT 1.0, + max_ratio REAL DEFAULT 2.0, + seed_time_minutes INTEGER DEFAULT 0, + action TEXT DEFAULT 'stop', + enabled INTEGER DEFAULT 1, + created_at TEXT NOT NULL, + updated_at TEXT NOT NULL, + UNIQUE(user_id, profile_id, name) +); + +CREATE TABLE IF NOT EXISTS rss_feeds ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + user_id INTEGER NOT NULL, + profile_id INTEGER, + name TEXT NOT NULL, + url TEXT NOT NULL, + enabled INTEGER DEFAULT 1, + last_error TEXT, + last_checked_at TEXT, + created_at TEXT NOT NULL, + updated_at TEXT NOT NULL +); + +CREATE TABLE IF NOT EXISTS rss_rules ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + user_id INTEGER NOT NULL, + profile_id INTEGER, + name TEXT NOT NULL, + pattern TEXT NOT NULL, + save_path TEXT, + label TEXT, + start INTEGER DEFAULT 1, + enabled INTEGER DEFAULT 1, + created_at TEXT NOT NULL, + updated_at TEXT NOT NULL +); + + +CREATE TABLE IF NOT EXISTS smart_queue_settings ( + user_id INTEGER NOT NULL, + profile_id INTEGER NOT NULL, + enabled INTEGER DEFAULT 0, + max_active_downloads INTEGER DEFAULT 5, + stalled_seconds INTEGER DEFAULT 300, + min_speed_bytes INTEGER DEFAULT 1024, + min_seeds INTEGER DEFAULT 1, + updated_at TEXT NOT NULL, + PRIMARY KEY(user_id, profile_id) +); + +CREATE TABLE IF NOT EXISTS smart_queue_stalled ( + profile_id INTEGER NOT NULL, + torrent_hash TEXT NOT NULL, + first_stalled_at TEXT NOT NULL, + updated_at TEXT NOT NULL, + PRIMARY KEY(profile_id, torrent_hash) +); + +CREATE TABLE IF NOT EXISTS smart_queue_exclusions ( + user_id INTEGER NOT NULL, + profile_id INTEGER NOT NULL, + torrent_hash TEXT NOT NULL, + reason TEXT, + created_at TEXT NOT NULL, + PRIMARY KEY(user_id, profile_id, torrent_hash) +); + +CREATE TABLE IF NOT EXISTS smart_queue_history ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + user_id INTEGER NOT NULL, + profile_id INTEGER NOT NULL, + event TEXT NOT NULL, + paused_count INTEGER DEFAULT 0, + resumed_count INTEGER DEFAULT 0, + checked_count INTEGER DEFAULT 0, + details_json TEXT, + created_at TEXT NOT NULL +); + +CREATE INDEX IF NOT EXISTS idx_smart_queue_history_profile_created ON smart_queue_history(profile_id, created_at); + +CREATE TABLE IF NOT EXISTS smart_queue_auto_labels ( + profile_id INTEGER NOT NULL, + torrent_hash TEXT NOT NULL, + previous_label TEXT, + created_at TEXT NOT NULL, + updated_at TEXT NOT NULL, + PRIMARY KEY(profile_id, torrent_hash) +); + +CREATE TABLE IF NOT EXISTS traffic_history ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + profile_id INTEGER NOT NULL, + down_rate INTEGER DEFAULT 0, + up_rate INTEGER DEFAULT 0, + total_down INTEGER DEFAULT 0, + total_up INTEGER DEFAULT 0, + created_at TEXT NOT NULL +); + +CREATE INDEX IF NOT EXISTS idx_traffic_history_profile_created ON traffic_history(profile_id, created_at); + +CREATE TABLE IF NOT EXISTS automation_rules ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + user_id INTEGER NOT NULL, + profile_id INTEGER, + name TEXT NOT NULL, + enabled INTEGER DEFAULT 1, + conditions_json TEXT NOT NULL, + effects_json TEXT NOT NULL, + cooldown_minutes INTEGER DEFAULT 60, + created_at TEXT NOT NULL, + updated_at TEXT NOT NULL +); +CREATE INDEX IF NOT EXISTS idx_automation_rules_profile_enabled ON automation_rules(profile_id, enabled); +CREATE TABLE IF NOT EXISTS automation_rule_state ( + rule_id INTEGER NOT NULL, + profile_id INTEGER NOT NULL, + torrent_hash TEXT NOT NULL, + condition_since_at TEXT, + last_matched_at TEXT, + last_applied_at TEXT, + updated_at TEXT NOT NULL, + PRIMARY KEY(rule_id, profile_id, torrent_hash) +); +CREATE TABLE IF NOT EXISTS automation_history ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + user_id INTEGER NOT NULL, + profile_id INTEGER NOT NULL, + rule_id INTEGER, + torrent_hash TEXT, + torrent_name TEXT, + rule_name TEXT, + actions_json TEXT, + created_at TEXT NOT NULL +); +CREATE INDEX IF NOT EXISTS idx_automation_history_profile_created ON automation_history(profile_id, created_at); + +CREATE TABLE IF NOT EXISTS rtorrent_config_overrides ( + user_id INTEGER NOT NULL, + profile_id INTEGER NOT NULL, + key TEXT NOT NULL, + value TEXT, + baseline_value TEXT, + apply_on_start INTEGER DEFAULT 0, + updated_at TEXT NOT NULL, + PRIMARY KEY(user_id, profile_id, key) +); +CREATE INDEX IF NOT EXISTS idx_rtorrent_config_overrides_profile ON rtorrent_config_overrides(profile_id, apply_on_start); + +CREATE TABLE IF NOT EXISTS app_settings ( + key TEXT PRIMARY KEY, + value TEXT +); +""" + +MIGRATIONS = [ + "ALTER TABLE user_preferences ADD COLUMN mobile_mode INTEGER DEFAULT 0", + "ALTER TABLE user_preferences ADD COLUMN peers_refresh_seconds INTEGER DEFAULT 0", + "ALTER TABLE user_preferences ADD COLUMN port_check_enabled INTEGER DEFAULT 0", + "ALTER TABLE user_preferences ADD COLUMN bootstrap_theme TEXT DEFAULT 'default'", + "ALTER TABLE user_preferences ADD COLUMN font_family TEXT DEFAULT 'default'", + "ALTER TABLE rtorrent_profiles ADD COLUMN max_parallel_jobs INTEGER DEFAULT 5", + "ALTER TABLE rtorrent_profiles ADD COLUMN is_remote INTEGER DEFAULT 0", + "ALTER TABLE jobs ADD COLUMN attempts INTEGER DEFAULT 0", + "ALTER TABLE jobs ADD COLUMN max_attempts INTEGER DEFAULT 2", + "ALTER TABLE jobs ADD COLUMN result_json TEXT", + "ALTER TABLE jobs ADD COLUMN started_at TEXT", + "ALTER TABLE jobs ADD COLUMN finished_at TEXT", + "ALTER TABLE automation_rules ADD COLUMN cooldown_minutes INTEGER DEFAULT 60", + "ALTER TABLE rtorrent_config_overrides ADD COLUMN apply_on_start INTEGER DEFAULT 0", + "ALTER TABLE rtorrent_config_overrides ADD COLUMN baseline_value TEXT", +] + + +def utcnow() -> str: + return datetime.now(timezone.utc).isoformat(timespec="seconds") + + +def dict_factory(cursor, row): + return {col[0]: row[idx] for idx, col in enumerate(cursor.description)} + + +@contextmanager +def connect(): + DB_PATH.parent.mkdir(parents=True, exist_ok=True) + conn = sqlite3.connect(DB_PATH, timeout=30) + conn.row_factory = dict_factory + conn.execute("PRAGMA foreign_keys = ON") + conn.execute("PRAGMA journal_mode = WAL") + try: + yield conn + conn.commit() + finally: + conn.close() + + +def init_db(): + with connect() as conn: + conn.executescript(SCHEMA) + for sql in MIGRATIONS: + try: + conn.execute(sql) + except sqlite3.OperationalError: + pass + now = utcnow() + conn.execute( + "INSERT OR IGNORE INTO users(id, username, password_hash, created_at) VALUES(1, 'default', NULL, ?)", + (now,), + ) + pref = conn.execute("SELECT id FROM user_preferences WHERE user_id=1").fetchone() + if not pref: + conn.execute( + "INSERT INTO user_preferences(user_id, theme, created_at, updated_at) VALUES(1, 'dark', ?, ?)", + (now, now), + ) + + +def default_user_id() -> int: + return 1 diff --git a/pytorrent/routes/api.py b/pytorrent/routes/api.py new file mode 100644 index 0000000..66d59fc --- /dev/null +++ b/pytorrent/routes/api.py @@ -0,0 +1,848 @@ +from __future__ import annotations + +import base64 +import os +import platform +import sys +import time +import re +from datetime import datetime, timezone +import urllib.request +import urllib.parse +import socket +import json +import psutil +import xml.etree.ElementTree as ET +from flask import Blueprint, jsonify, request +from ..config import DB_PATH, JOBS_RETENTION_DAYS, SMART_QUEUE_HISTORY_RETENTION_DAYS, WORKERS +from ..db import default_user_id, connect, utcnow +from ..services import preferences, rtorrent +from ..services.torrent_cache import torrent_cache +from ..services.torrent_summary import cached_summary +from ..services.workers import enqueue, list_jobs, cancel_job, retry_job, clear_jobs +from ..services.geoip import lookup_ip + +bp = Blueprint("api", __name__, url_prefix="/api") + + +def ok(payload=None): + data = {"ok": True} + if payload: + data.update(payload) + return jsonify(data) + + + +PORT_CHECK_CACHE_SECONDS = 6 * 60 * 60 + + +def _app_setting_get(key: str): + with connect() as conn: + row = conn.execute("SELECT value FROM app_settings WHERE key=?", (key,)).fetchone() + return row.get("value") if row else None + + +def _app_setting_set(key: str, value: str): + with connect() as conn: + conn.execute("INSERT OR REPLACE INTO app_settings(key,value) VALUES(?,?)", (key, value)) + + +def _iso_from_epoch(value) -> str | None: + try: + return datetime.fromtimestamp(float(value), timezone.utc).isoformat(timespec="seconds") + except Exception: + return None + + +def _public_ip(profile: dict | None = None, force: bool = False) -> str: + if profile and bool(profile.get("is_remote")): + return rtorrent.remote_public_ip(profile, force=force) + req = urllib.request.Request("https://api.ipify.org", headers={"User-Agent": "pyTorrent/port-check"}) + with urllib.request.urlopen(req, timeout=8) as res: + return res.read(64).decode("utf-8", "replace").strip() + + +def _incoming_port(profile: dict) -> int | None: + try: + value = str(rtorrent.client_for(profile).call("network.port_range") or "") + except Exception: + value = "" + match = re.search(r"(\d{2,5})", value) + if not match: + return None + port = int(match.group(1)) + return port if 1 <= port <= 65535 else None + + +def _yougetsignal_check(public_ip: str, port: int) -> dict: + body = urllib.parse.urlencode({"remoteAddress": public_ip, "portNumber": str(port)}).encode("utf-8") + req = urllib.request.Request( + "https://ports.yougetsignal.com/check-port.php", + data=body, + headers={ + "Content-Type": "application/x-www-form-urlencoded; charset=UTF-8", + "User-Agent": "pyTorrent/port-check", + "Accept": "text/html,application/json,*/*", + }, + method="POST", + ) + with urllib.request.urlopen(req, timeout=12) as res: + text = res.read(8192).decode("utf-8", "replace") + low = text.lower() + if "is open" in low: + return {"status": "open", "source": "yougetsignal", "raw": text[:500]} + if "is closed" in low: + return {"status": "closed", "source": "yougetsignal", "raw": text[:500]} + return {"status": "unknown", "source": "yougetsignal", "raw": text[:500]} + + +def _local_port_fallback(public_ip: str, port: int) -> dict: + try: + with socket.create_connection((public_ip, port), timeout=3): + return {"status": "open", "source": "local-fallback"} + except Exception as exc: + return {"status": "unknown", "source": "local-fallback", "error": f"Local fallback inconclusive: {exc}"} + + +def port_check_status(force: bool = False) -> dict: + profile = preferences.active_profile() + prefs = preferences.get_preferences() + enabled = bool((prefs or {}).get("port_check_enabled")) + if not profile: + return {"status": "unknown", "enabled": enabled, "error": "No profile"} + port = _incoming_port(profile) + if not port: + return {"status": "unknown", "enabled": enabled, "error": "Cannot read rTorrent network.port_range"} + cache_key = f"port_check:{profile['id']}:{port}" + if not force: + cached = _app_setting_get(cache_key) + if cached: + try: + data = json.loads(cached) + if time.time() - float(data.get("checked_at_epoch") or 0) < PORT_CHECK_CACHE_SECONDS: + data["cached"] = True + data["enabled"] = enabled + if not data.get("checked_at"): + data["checked_at"] = _iso_from_epoch(data.get("checked_at_epoch")) + return data + except Exception: + pass + checked_at_epoch = time.time() + result = {"status": "unknown", "enabled": enabled, "port": port, "checked_at_epoch": checked_at_epoch, "checked_at": _iso_from_epoch(checked_at_epoch), "cached": False} + try: + public_ip = _public_ip(profile, force=force) + result["public_ip"] = public_ip + result["remote"] = bool(profile.get("is_remote")) + result.update(_yougetsignal_check(public_ip, port)) + except Exception as exc: + result["error"] = f"YouGetSignal failed: {exc}" + try: + public_ip = result.get("public_ip") or _public_ip(profile, force=force) + result["public_ip"] = public_ip + result["remote"] = bool(profile.get("is_remote")) + result.update(_local_port_fallback(public_ip, port)) + except Exception as fallback_exc: + result["fallback_error"] = str(fallback_exc) + result["source"] = "none" + _app_setting_set(cache_key, json.dumps(result)) + return result + + + + + + +def _safe_len(callable_obj) -> int | None: + try: + return len(callable_obj()) + except Exception: + return None + +def _table_count(table: str, where: str = "", params: tuple = ()) -> int: + with connect() as conn: + exists = conn.execute("SELECT name FROM sqlite_master WHERE type='table' AND name=?", (table,)).fetchone() + if not exists: + return 0 + row = conn.execute(f"SELECT COUNT(*) AS n FROM {table} {where}", params).fetchone() + return int((row or {}).get("n") or 0) + + +def _db_size() -> dict: + try: + size = DB_PATH.stat().st_size if DB_PATH.exists() else 0 + return {"path": str(DB_PATH), "size": size, "size_h": rtorrent.human_size(size)} + except Exception as exc: + return {"path": str(DB_PATH), "size": 0, "size_h": "0 B", "error": str(exc)} + + +def cleanup_summary() -> dict: + return { + "jobs_total": _table_count("jobs"), + "jobs_clearable": _table_count("jobs", "WHERE status NOT IN ('pending', 'running')"), + "smart_queue_history_total": _table_count("smart_queue_history"), + "retention_days": { + "jobs": JOBS_RETENTION_DAYS, + "smart_queue_history": SMART_QUEUE_HISTORY_RETENTION_DAYS, + }, + "database": _db_size(), + } + +def active_default_download_path(profile: dict | None) -> str: + if not profile: + return "" + try: + return rtorrent.default_download_path(profile) + except Exception: + return "" + + +def enrich_bulk_payload(profile: dict, action_name: str, data: dict) -> dict: + payload = dict(data or {}) + hashes = payload.get("hashes") or [] + if isinstance(hashes, str): + hashes = [hashes] + hashes = [str(h) for h in hashes if h] + payload["hashes"] = hashes + payload["job_context"] = { + "source": "api", + "action": action_name, + "bulk": len(hashes) > 1, + "hash_count": len(hashes), + "requested_at": utcnow(), + } + if hashes: + try: + by_hash = {str(t.get("hash")): t for t in torrent_cache.snapshot(profile["id"])} + payload["job_context"]["items"] = [ + { + "hash": h, + "name": str((by_hash.get(h) or {}).get("name") or ""), + "path": str((by_hash.get(h) or {}).get("path") or ""), + } + for h in hashes + ] + except Exception as exc: + payload["job_context"]["items_error"] = str(exc) + if action_name == "move": + payload["job_context"]["target_path"] = str(payload.get("path") or "") + payload["job_context"]["move_data"] = bool(payload.get("move_data")) + if action_name == "remove": + payload["job_context"]["remove_data"] = bool(payload.get("remove_data")) + return payload + + +@bp.get("/profiles") +def profiles_list(): + return ok({"profiles": preferences.list_profiles(), "active": preferences.active_profile()}) + + +@bp.post("/profiles") +def profiles_create(): + try: + return ok({"profile": preferences.save_profile(request.json or {})}) + except Exception as exc: + return jsonify({"ok": False, "error": str(exc)}), 400 + + +@bp.put("/profiles/") +def profiles_update(profile_id: int): + try: + return ok({"profile": preferences.update_profile(profile_id, request.json or {})}) + except Exception as exc: + return jsonify({"ok": False, "error": str(exc)}), 400 + + +@bp.delete("/profiles/") +def profiles_delete(profile_id: int): + preferences.delete_profile(profile_id) + return ok({"profiles": preferences.list_profiles(), "active": preferences.active_profile()}) + + +@bp.post("/profiles//activate") +def profiles_activate(profile_id: int): + try: + return ok({"profile": preferences.activate_profile(profile_id)}) + except Exception as exc: + return jsonify({"ok": False, "error": str(exc)}), 404 + + +@bp.get("/preferences") +def prefs_get(): + return ok({"preferences": preferences.get_preferences()}) + + +@bp.post("/preferences") +def prefs_save(): + return ok({"preferences": preferences.save_preferences(request.json or {})}) + + +@bp.get("/torrents") +def torrents(): + profile = preferences.active_profile() + if not profile: + return ok({"torrents": [], "summary": cached_summary(0, []), "error": "No rTorrent profile"}) + rows = torrent_cache.snapshot(profile["id"]) + return ok({ + "profile_id": profile["id"], + "torrents": rows, + "summary": cached_summary(profile["id"], rows), + "error": torrent_cache.error(profile["id"]), + }) + + +@bp.get("/torrents//files") +def torrent_files(torrent_hash: str): + profile = preferences.active_profile() + if not profile: + return jsonify({"ok": False, "error": "No profile"}), 400 + return ok({"files": rtorrent.torrent_files(profile, torrent_hash)}) + + +@bp.post("/torrents//files/priority") +def torrent_file_priority(torrent_hash: str): + profile = preferences.active_profile() + if not profile: + return jsonify({"ok": False, "error": "No profile"}), 400 + data = request.get_json(silent=True) or {} + files = data.get("files") or [] + if not isinstance(files, list) or not files: + return jsonify({"ok": False, "error": "No files selected"}), 400 + result = rtorrent.set_file_priorities(profile, torrent_hash, files) + status = 207 if result.get("errors") else 200 + return ok(result), status + + +@bp.get("/torrents//peers") +def torrent_peers(torrent_hash: str): + profile = preferences.active_profile() + if not profile: + return jsonify({"ok": False, "error": "No profile"}), 400 + peers = rtorrent.torrent_peers(profile, torrent_hash) + for peer in peers: + peer.update(lookup_ip(peer.get("ip", ""))) + return ok({"peers": peers}) + + +@bp.post("/torrents//peers/action") +def torrent_peer_action(torrent_hash: str): + profile = preferences.active_profile() + if not profile: + return jsonify({"ok": False, "error": "No profile"}), 400 + data = request.get_json(silent=True) or {} + try: + result = rtorrent.peer_action(profile, torrent_hash, int(data.get("peer_index")), str(data.get("action") or "")) + return ok({"result": result, "message": f"Peer {result['action']} via {result['method']}"}) + except Exception as exc: + return jsonify({"ok": False, "error": str(exc)}), 400 + + +@bp.get("/torrents//trackers") +def torrent_trackers(torrent_hash: str): + profile = preferences.active_profile() + if not profile: + return jsonify({"ok": False, "error": "No profile"}), 400 + return ok({"trackers": rtorrent.torrent_trackers(profile, torrent_hash)}) + + +@bp.post("/torrents//trackers/") +def torrent_tracker_action(torrent_hash: str, action_name: str): + profile = preferences.active_profile() + if not profile: + return jsonify({"ok": False, "error": "No profile"}), 400 + try: + result = rtorrent.tracker_action(profile, torrent_hash, action_name, request.get_json(silent=True) or {}) + return ok({"result": result, "message": f"Tracker {action_name} via {result.get('method', 'XMLRPC')}"}) + except Exception as exc: + return jsonify({"ok": False, "error": str(exc)}), 400 + + +@bp.post("/torrents/") +def torrent_action(action_name: str): + profile = preferences.active_profile() + if not profile: + return jsonify({"ok": False, "error": "No profile"}), 400 + data = request.get_json(silent=True) or {} + allowed = {"start", "pause", "stop", "resume", "recheck", "reannounce", "remove", "move", "set_label", "set_ratio_group"} + if action_name not in allowed: + return jsonify({"ok": False, "error": "Unknown action"}), 400 + payload = enrich_bulk_payload(profile, action_name, data) + job_id = enqueue(action_name, profile["id"], payload) + return ok({"job_id": job_id, "hash_count": len(payload.get("hashes") or []), "bulk": len(payload.get("hashes") or []) > 1}) + + +@bp.post("/torrents/add") +def torrent_add(): + profile = preferences.active_profile() + if not profile: + return jsonify({"ok": False, "error": "No profile"}), 400 + job_ids = [] + if request.content_type and request.content_type.startswith("multipart/form-data"): + start = request.form.get("start", "1") in {"1", "true", "on", "yes"} + directory = request.form.get("directory", "") or active_default_download_path(profile) + label = request.form.get("label", "") + uris = [x.strip() for x in request.form.get("uris", "").splitlines() if x.strip()] + for uri in uris: + job_ids.append(enqueue("add_magnet", profile["id"], {"uri": uri, "start": start, "directory": directory, "label": label})) + for uploaded in request.files.getlist("files"): + data_b64 = base64.b64encode(uploaded.read()).decode("ascii") + job_ids.append(enqueue("add_torrent_raw", profile["id"], {"filename": uploaded.filename, "data_b64": data_b64, "start": start, "directory": directory, "label": label})) + return ok({"job_ids": job_ids}) + data = request.get_json(silent=True) or {} + uris = data.get("uris") or [] + if isinstance(uris, str): + uris = [x.strip() for x in uris.splitlines() if x.strip()] + for uri in uris: + job_ids.append(enqueue("add_magnet", profile["id"], {"uri": uri, "start": data.get("start", True), "directory": data.get("directory", "") or active_default_download_path(profile), "label": data.get("label", "")})) + return ok({"job_ids": job_ids}) + + +@bp.post("/speed/limits") +def speed_limits(): + profile = preferences.active_profile() + if not profile: + return jsonify({"ok": False, "error": "No profile"}), 400 + data = request.get_json(silent=True) or {} + job_id = enqueue("set_limits", profile["id"], {"down": data.get("down"), "up": data.get("up")}) + return ok({"job_id": job_id}) + + +@bp.get("/system/status") +def system_status(): + profile = preferences.active_profile() + if not profile: + return jsonify({"ok": False, "error": "No profile"}) + try: + status = rtorrent.system_status(profile) + if bool(profile.get("is_remote")): + status["usage_source"] = "remote-hidden" + status["usage_available"] = False + else: + status["cpu"] = psutil.cpu_percent(interval=None) + status["ram"] = psutil.virtual_memory().percent + status["usage_source"] = "local" + status["usage_available"] = True + return ok({"status": status}) + except Exception as exc: + return jsonify({"ok": False, "error": str(exc)}) + + +@bp.get("/app/status") +def app_status(): + started = time.perf_counter() + profile = preferences.active_profile() + proc = psutil.Process(os.getpid()) + try: + jobs = list_jobs(10, 0) + jobs_total = jobs.get("total", 0) + except Exception: + jobs_total = 0 + status = { + "pytorrent": { + "ok": True, + "pid": os.getpid(), + "uptime_seconds": round(time.time() - proc.create_time(), 1), + "memory_rss": proc.memory_info().rss, + "memory_rss_h": rtorrent.human_size(proc.memory_info().rss), + "threads": proc.num_threads(), + "cpu_percent": proc.cpu_percent(interval=None), + "jobs_total": jobs_total, + "python": platform.python_version(), + "platform": platform.platform(), + "executable": sys.executable, + "worker_threads": WORKERS, + "open_files": _safe_len(proc.open_files) if hasattr(proc, "open_files") else None, + "connections": _safe_len(lambda: proc.net_connections(kind="inet")) if hasattr(proc, "net_connections") else None, + }, + "cleanup": cleanup_summary(), + "profile": profile, + "scgi": None, + } + if profile: + try: + status["scgi"] = rtorrent.scgi_diagnostics(profile) + except Exception as exc: + status["scgi"] = {"ok": False, "error": str(exc), "url": profile.get("scgi_url")} + try: + prefs = preferences.get_preferences() + status["port_check"] = {"status": "disabled", "enabled": False} if not bool((prefs or {}).get("port_check_enabled")) else port_check_status(force=False) + except Exception as exc: + status["port_check"] = {"status": "error", "error": str(exc)} + status["api_ms"] = round((time.perf_counter() - started) * 1000, 2) + return ok({"status": status}) + + +@bp.get("/port-check") +def port_check_get(): + prefs = preferences.get_preferences() + if not bool((prefs or {}).get("port_check_enabled")): + return ok({"port_check": {"status": "disabled", "enabled": False}}) + return ok({"port_check": port_check_status(force=False)}) + + +@bp.post("/port-check") +def port_check_post(): + return ok({"port_check": port_check_status(force=True)}) + + +@bp.get("/jobs") +def jobs_list(): + limit = int(request.args.get("limit", 50)) + offset = int(request.args.get("offset", 0)) + data = list_jobs(limit, offset) + return ok({"jobs": data["rows"], "total": data["total"], "limit": data["limit"], "offset": data["offset"]}) + + +@bp.post("/jobs/clear") +def jobs_clear(): + deleted = clear_jobs() + return ok({"deleted": deleted}) + + +@bp.get("/cleanup/summary") +def cleanup_status(): + return ok({"cleanup": cleanup_summary()}) + + +@bp.post("/cleanup/jobs") +def cleanup_jobs(): + deleted = clear_jobs() + return ok({"deleted": deleted, "cleanup": cleanup_summary()}) + + +@bp.post("/cleanup/smart-queue") +def cleanup_smart_queue(): + with connect() as conn: + exists = conn.execute("SELECT name FROM sqlite_master WHERE type='table' AND name='smart_queue_history'").fetchone() + if not exists: + deleted = 0 + else: + cur = conn.execute("DELETE FROM smart_queue_history") + deleted = int(cur.rowcount or 0) + return ok({"deleted": deleted, "cleanup": cleanup_summary()}) + + +@bp.post("/cleanup/all") +def cleanup_all(): + deleted_jobs = clear_jobs() + with connect() as conn: + exists = conn.execute("SELECT name FROM sqlite_master WHERE type='table' AND name='smart_queue_history'").fetchone() + if not exists: + deleted_smart = 0 + else: + cur = conn.execute("DELETE FROM smart_queue_history") + deleted_smart = int(cur.rowcount or 0) + return ok({"deleted": {"jobs": deleted_jobs, "smart_queue_history": deleted_smart}, "cleanup": cleanup_summary()}) + + +@bp.post("/jobs//cancel") +def jobs_cancel(job_id: str): + if not cancel_job(job_id): + return jsonify({"ok": False, "error": "Only pending or failed jobs can be cancelled"}), 400 + return ok() + + +@bp.post("/jobs//retry") +def jobs_retry(job_id: str): + if not retry_job(job_id): + return jsonify({"ok": False, "error": "Only failed or cancelled jobs can be retried"}), 400 + return ok() + + +@bp.get("/path/default") +def path_default(): + profile = preferences.active_profile() + if not profile: + return jsonify({"ok": False, "error": "No profile"}), 400 + try: + return ok({"path": rtorrent.default_download_path(profile)}) + except Exception as exc: + return jsonify({"ok": False, "error": str(exc)}), 400 + + +@bp.get("/path/browse") +def path_browse(): + profile = preferences.active_profile() + if not profile: + return jsonify({"ok": False, "error": "No profile"}), 400 + base = request.args.get("path") or "" + try: + return ok(rtorrent.browse_path(profile, base)) + except Exception as exc: + return jsonify({"ok": False, "error": str(exc)}), 400 + + +@bp.get("/labels") +def labels_list(): + profile = preferences.active_profile() + pid = profile["id"] if profile else None + with connect() as conn: + rows = conn.execute("SELECT * FROM labels WHERE user_id=? AND (profile_id=? OR profile_id IS NULL) ORDER BY name COLLATE NOCASE", (default_user_id(), pid)).fetchall() + return ok({"labels": rows}) + + +@bp.post("/labels") +def labels_save(): + profile = preferences.active_profile() + if not profile: + return jsonify({"ok": False, "error": "No profile"}), 400 + data = request.get_json(silent=True) or {} + name = str(data.get("name") or "").strip() + if not name: + return jsonify({"ok": False, "error": "Missing label name"}), 400 + now = utcnow() + with connect() as conn: + conn.execute("INSERT OR IGNORE INTO labels(user_id,profile_id,name,color,created_at,updated_at) VALUES(?,?,?,?,?,?)", (default_user_id(), profile["id"], name, data.get("color") or "#64748b", now, now)) + return labels_list() + + +@bp.delete("/labels/") +def labels_delete(label_id: int): + profile = preferences.active_profile() + pid = profile["id"] if profile else None + with connect() as conn: + conn.execute("DELETE FROM labels WHERE id=? AND user_id=? AND (profile_id=? OR profile_id IS NULL)", (label_id, default_user_id(), pid)) + return labels_list() + + +@bp.get("/ratio-groups") +def ratio_groups_list(): + profile = preferences.active_profile() + pid = profile["id"] if profile else None + with connect() as conn: + rows = conn.execute("SELECT * FROM ratio_groups WHERE user_id=? AND (profile_id=? OR profile_id IS NULL) ORDER BY name COLLATE NOCASE", (default_user_id(), pid)).fetchall() + return ok({"groups": rows}) + + +@bp.post("/ratio-groups") +def ratio_groups_save(): + profile = preferences.active_profile() + if not profile: + return jsonify({"ok": False, "error": "No profile"}), 400 + data = request.get_json(silent=True) or {} + name = str(data.get("name") or "").strip() + if not name: + return jsonify({"ok": False, "error": "Missing group name"}), 400 + now = utcnow() + with connect() as conn: + conn.execute("INSERT OR REPLACE INTO ratio_groups(user_id,profile_id,name,min_ratio,max_ratio,seed_time_minutes,action,enabled,created_at,updated_at) VALUES(?,?,?,?,?,?,?,?,?,?)", (default_user_id(), profile["id"], name, float(data.get("min_ratio") or 1), float(data.get("max_ratio") or 2), int(data.get("seed_time_minutes") or 0), data.get("action") or "stop", 1 if data.get("enabled", True) else 0, now, now)) + return ratio_groups_list() + + +@bp.get("/rss") +def rss_list(): + profile = preferences.active_profile() + pid = profile["id"] if profile else None + with connect() as conn: + feeds = conn.execute("SELECT * FROM rss_feeds WHERE user_id=? AND (profile_id=? OR profile_id IS NULL) ORDER BY name", (default_user_id(), pid)).fetchall() + rules = conn.execute("SELECT * FROM rss_rules WHERE user_id=? AND (profile_id=? OR profile_id IS NULL) ORDER BY name", (default_user_id(), pid)).fetchall() + return ok({"feeds": feeds, "rules": rules}) + + +@bp.post("/rss/feeds") +def rss_feed_save(): + profile = preferences.active_profile() + data = request.get_json(silent=True) or {} + now = utcnow() + with connect() as conn: + conn.execute("INSERT INTO rss_feeds(user_id,profile_id,name,url,enabled,created_at,updated_at) VALUES(?,?,?,?,?,?,?)", (default_user_id(), profile["id"] if profile else None, data.get("name") or "RSS", data.get("url") or "", 1, now, now)) + return rss_list() + + +@bp.post("/rss/rules") +def rss_rule_save(): + profile = preferences.active_profile() + data = request.get_json(silent=True) or {} + now = utcnow() + with connect() as conn: + conn.execute("INSERT INTO rss_rules(user_id,profile_id,name,pattern,save_path,label,start,enabled,created_at,updated_at) VALUES(?,?,?,?,?,?,?,?,?,?)", (default_user_id(), profile["id"] if profile else None, data.get("name") or "Rule", data.get("pattern") or ".*", data.get("save_path") or active_default_download_path(profile), data.get("label") or "", 1 if data.get("start", True) else 0, 1, now, now)) + return rss_list() + + +@bp.post("/rss/check") +def rss_check(): + profile = preferences.active_profile() + if not profile: + return jsonify({"ok": False, "error": "No profile"}), 400 + queued = 0 + with connect() as conn: + feeds = conn.execute("SELECT * FROM rss_feeds WHERE user_id=? AND profile_id=? AND enabled=1", (default_user_id(), profile["id"])).fetchall() + rules = conn.execute("SELECT * FROM rss_rules WHERE user_id=? AND profile_id=? AND enabled=1", (default_user_id(), profile["id"])).fetchall() + for feed in feeds: + try: + raw = urllib.request.urlopen(feed["url"], timeout=10).read(2_000_000) + root = ET.fromstring(raw) + for item in root.findall('.//item')[:100]: + title = item.findtext('title') or '' + link = item.findtext('link') or '' + enc = item.find('enclosure') + if enc is not None and enc.get('url'): + link = enc.get('url') or link + for rule in rules: + if re.search(rule["pattern"], title, re.I) and link: + enqueue("add_magnet", profile["id"], {"uri": link, "start": bool(rule["start"]), "directory": rule.get("save_path") or active_default_download_path(profile), "label": rule.get("label") or ""}) + queued += 1 + except Exception as exc: + with connect() as conn: + conn.execute("UPDATE rss_feeds SET last_error=?, last_checked_at=?, updated_at=? WHERE id=?", (str(exc), utcnow(), utcnow(), feed["id"])) + return ok({"queued": queued}) + + +@bp.get('/rtorrent-config') +def rtorrent_config_get(): + profile = preferences.active_profile() + if not profile: + return jsonify({'ok': False, 'error': 'No profile'}), 400 + try: + return ok({'config': rtorrent.get_config(profile)}) + except Exception as exc: + return jsonify({'ok': False, 'error': str(exc)}), 500 + +@bp.post('/rtorrent-config') +def rtorrent_config_save(): + profile = preferences.active_profile() + if not profile: + return jsonify({'ok': False, 'error': 'No profile'}), 400 + try: + data = request.get_json(silent=True) or {} + result = rtorrent.set_config(profile, data.get('values') or {}, bool(data.get('apply_now', True)), bool(data.get('apply_on_start')), data.get('clear_keys') or []) + if not result.get('ok'): + return jsonify({'ok': False, 'error': 'Some settings were not saved', 'result': result}), 400 + return ok({'result': result}) + except Exception as exc: + return jsonify({'ok': False, 'error': str(exc)}), 500 + + +@bp.post('/rtorrent-config/generate') +def rtorrent_config_generate(): + profile = preferences.active_profile() + if not profile: + return jsonify({'ok': False, 'error': 'No profile'}), 400 + try: + data = request.get_json(silent=True) or {} + return ok({'config_text': rtorrent.generate_config_text(data.get('values') or {})}) + except Exception as exc: + return jsonify({'ok': False, 'error': str(exc)}), 500 + +@bp.get('/smart-queue') +def smart_queue_get(): + from ..services import smart_queue + profile = preferences.active_profile() + if not profile: + return ok({'settings': {}, 'exclusions': [], 'error': 'No profile'}) + try: + history_limit = max(1, min(int(request.args.get('history_limit', 10) or 10), 100)) + settings = smart_queue.get_settings(profile['id']) + exclusions = smart_queue.list_exclusions(profile['id']) + history = smart_queue.list_history(profile['id'], limit=history_limit) + history_total = smart_queue.count_history(profile['id']) + return ok({'settings': settings, 'exclusions': exclusions, 'history': history, 'history_total': history_total}) + except Exception as exc: + return jsonify({'ok': False, 'error': str(exc), 'settings': {}, 'exclusions': []}) + + +@bp.post('/smart-queue') +def smart_queue_save(): + from ..services import smart_queue + profile = preferences.active_profile() + if not profile: + return ok({'settings': {}, 'error': 'No profile'}) + try: + payload = request.get_json(silent=True) or {} + return ok({'settings': smart_queue.save_settings(profile['id'], payload)}) + except Exception as exc: + return jsonify({'ok': False, 'error': str(exc)}) + + +@bp.post('/smart-queue/check') +def smart_queue_check(): + from ..services import smart_queue + profile = preferences.active_profile() + if not profile: + return ok({'result': {'ok': False, 'error': 'No profile'}}) + try: + return ok({'result': smart_queue.check(profile, force=True)}) + except Exception as exc: + return jsonify({'ok': False, 'error': str(exc)}), 500 + + +@bp.post('/smart-queue/exclusion') +def smart_queue_exclusion(): + from ..services import smart_queue + profile = preferences.active_profile() + if not profile: + return jsonify({'ok': False, 'error': 'No profile'}), 400 + data = request.get_json(silent=True) or {} + torrent_hash = str(data.get('hash') or '').strip() + if not torrent_hash: + return jsonify({'ok': False, 'error': 'Missing torrent hash'}), 400 + smart_queue.set_exclusion(profile['id'], torrent_hash, bool(data.get('excluded', True)), str(data.get('reason') or 'manual')) + return ok({'exclusions': smart_queue.list_exclusions(profile['id'])}) + +@bp.get('/traffic/history') +def traffic_history_get(): + from ..services import traffic_history + profile = preferences.active_profile() + if not profile: + return ok({'history': {'range': request.args.get('range') or '7d', 'bucket': 'day', 'rows': []}}) + range_name = request.args.get('range') or '7d' + if range_name not in {'15m', '1h', '3h', '6h', '24h', '7d', '30d', '90d'}: + range_name = '7d' + try: + try: + from ..services import rtorrent + status = rtorrent.system_status(profile) + traffic_history.record(profile['id'], status.get('down_rate', 0), status.get('up_rate', 0), status.get('total_down', 0), status.get('total_up', 0), force=True) + except Exception: + pass + return ok({'history': traffic_history.history(profile['id'], range_name)}) + except Exception as exc: + return jsonify({'ok': False, 'error': str(exc), 'history': {'range': range_name, 'rows': []}}) + +@bp.get('/automations') +def automations_get(): + from ..services import automation_rules + profile = preferences.active_profile() + if not profile: + return ok({'rules': [], 'history': [], 'error': 'No profile'}) + try: + return ok({'rules': automation_rules.list_rules(profile['id']), 'history': automation_rules.list_history(profile['id'])}) + except Exception as exc: + return jsonify({'ok': False, 'error': str(exc), 'rules': [], 'history': []}), 500 + + +@bp.post('/automations') +def automations_save(): + from ..services import automation_rules + profile = preferences.active_profile() + if not profile: + return jsonify({'ok': False, 'error': 'No profile'}), 400 + try: + rule = automation_rules.save_rule(profile['id'], request.get_json(silent=True) or {}) + return ok({'rule': rule, 'rules': automation_rules.list_rules(profile['id'])}) + except Exception as exc: + return jsonify({'ok': False, 'error': str(exc)}), 400 + + +@bp.delete('/automations/') +def automations_delete(rule_id: int): + from ..services import automation_rules + profile = preferences.active_profile() + if not profile: + return jsonify({'ok': False, 'error': 'No profile'}), 400 + try: + automation_rules.delete_rule(rule_id, profile['id']) + return ok({'rules': automation_rules.list_rules(profile['id'])}) + except Exception as exc: + return jsonify({'ok': False, 'error': str(exc)}), 400 + + +@bp.post('/automations/check') +def automations_check(): + from ..services import automation_rules + profile = preferences.active_profile() + if not profile: + return jsonify({'ok': False, 'error': 'No profile'}), 400 + try: + return ok({'result': automation_rules.check(profile, force=True), 'history': automation_rules.list_history(profile['id'])}) + except Exception as exc: + return jsonify({'ok': False, 'error': str(exc)}), 500 diff --git a/pytorrent/routes/main.py b/pytorrent/routes/main.py new file mode 100644 index 0000000..57c6e95 --- /dev/null +++ b/pytorrent/routes/main.py @@ -0,0 +1,281 @@ +from __future__ import annotations + +from flask import Blueprint, render_template, jsonify, Response +from ..services.preferences import get_preferences, list_profiles, active_profile, BOOTSTRAP_THEMES, FONT_FAMILIES, bootstrap_css_url + +bp = Blueprint("main", __name__) + + +@bp.get("/") +def index(): + prefs = get_preferences() + return render_template( + "index.html", + prefs=prefs, + profiles=list_profiles(), + active_profile=active_profile(), + bootstrap_themes=BOOTSTRAP_THEMES, + font_families=FONT_FAMILIES, + bootstrap_css_url=bootstrap_css_url((prefs or {}).get("bootstrap_theme")), + ) + + +@bp.get("/docs") +def docs(): + html = """pyTorrent API Docs
""" + return Response(html, mimetype="text/html") + + +@bp.get("/api/openapi.json") +def openapi(): + paths = { + "/api/profiles": { + "get": {"summary": "List rTorrent profiles", "responses": {"200": {"description": "Profiles"}}}, + "post": {"summary": "Create rTorrent profile", "requestBody": {"required": True, "content": {"application/json": {"schema": {"type": "object", "properties": {"name": {"type": "string"}, "scgi_url": {"type": "string"}, "timeout_seconds": {"type": "integer"}, "max_parallel_jobs": {"type": "integer", "default": 5, "description": "Maximum queued jobs that may run at once for this rTorrent. Move/remove jobs keep request order."}, "is_remote": {"type": "boolean", "description": "When true, CPU/RAM host usage is hidden; public IP checks try remote rTorrent commands when supported."}}}}}}, "responses": {"200": {"description": "Created"}}} + }, + + "/api/profiles/{profile_id}": { + "put": {"summary": "Update rTorrent profile", "parameters": [{"name": "profile_id", "in": "path", "required": True, "schema": {"type": "integer"}}], "requestBody": {"required": True, "content": {"application/json": {"schema": {"type": "object", "properties": {"name": {"type": "string"}, "scgi_url": {"type": "string"}, "timeout_seconds": {"type": "integer"}, "max_parallel_jobs": {"type": "integer", "default": 5, "description": "Maximum queued jobs that may run at once for this rTorrent. Move/remove jobs keep request order."}, "is_remote": {"type": "boolean", "description": "When true, CPU/RAM host usage is hidden; public IP checks try remote rTorrent commands when supported."}}}}}}, "responses": {"200": {"description": "Updated"}}}, + "delete": {"summary": "Delete rTorrent profile", "parameters": [{"name": "profile_id", "in": "path", "required": True, "schema": {"type": "integer"}}], "responses": {"200": {"description": "Deleted"}}} + }, + "/api/profiles/{profile_id}/activate": {"post": {"summary": "Activate profile", "parameters": [{"name": "profile_id", "in": "path", "required": True, "schema": {"type": "integer"}}], "responses": {"200": {"description": "Activated"}}}}, + "/api/preferences": { + "get": {"summary": "Get preferences", "responses": {"200": {"description": "Preferences including theme, bootstrap_theme and font_family"}}}, + "post": { + "summary": "Save preferences", + "requestBody": {"content": {"application/json": {"schema": {"type": "object", "properties": { + "theme": {"type": "string", "enum": ["light", "dark"]}, + "bootstrap_theme": {"type": "string", "enum": list(BOOTSTRAP_THEMES.keys())}, + "font_family": {"type": "string", "enum": list(FONT_FAMILIES.keys())}, + "table_columns_json": {"type": "string"}, + "peers_refresh_seconds": {"type": "integer", "enum": [0, 10, 15, 30, 60]}, + "port_check_enabled": {"type": "boolean"}, + }}}}}, + "responses": {"200": {"description": "Saved"}}, + }, + }, + "/api/torrents": {"get": {"summary": "Get cached torrent snapshot", "responses": {"200": {"description": "Torrent list"}}}}, + "/api/torrents/{action_name}": {"post": {"summary": "Queue torrent action", "description": "For move, path is the target directory; move_data=true physically moves data on the rTorrent host using a detached shell move with status polling, force-overwrites an existing destination, tolerates rTorrent execute timeouts around mkdir/start/polling, handles retries after a partially completed move, avoids SCGI timeout on long mv operations, and recheck defaults to move_data. Move and remove jobs are ordered per profile, so a later remove waits for earlier move/remove jobs to finish.", "parameters": [{"name": "action_name", "in": "path", "required": True, "schema": {"type": "string", "enum": ["start", "pause", "stop", "resume", "recheck", "remove", "move", "set_label", "set_ratio_group"]}}], "requestBody": {"content": {"application/json": {"schema": {"type": "object", "properties": {"hashes": {"type": "array", "items": {"type": "string"}}, "path": {"type": "string", "description": "Target directory for move"}, "move_data": {"type": "boolean", "description": "Physically move data before setting torrent directory"}, "recheck": {"type": "boolean", "description": "Run hash check after physical move; defaults to move_data"}, "label": {"type": "string"}, "ratio_group": {"type": "string"}, "remove_data": {"type": "boolean"}}}}}}, "responses": {"200": {"description": "Job queued"}}}}, + "/api/torrents/add": {"post": {"summary": "Add magnet links or torrent files", "requestBody": {"content": {"multipart/form-data": {"schema": {"type": "object", "properties": {"uris": {"type": "string"}, "directory": {"type": "string"}, "label": {"type": "string"}, "start": {"type": "boolean"}, "files": {"type": "array", "items": {"type": "string", "format": "binary"}}}}}, "application/json": {"schema": {"type": "object"}}}}, "responses": {"200": {"description": "Jobs queued"}}}}, + "/api/torrents/{torrent_hash}/files": {"get": {"summary": "Torrent files", "parameters": [{"name": "torrent_hash", "in": "path", "required": True, "schema": {"type": "string"}}], "responses": {"200": {"description": "Files"}}}}, + "/api/torrents/{torrent_hash}/peers": {"get": {"summary": "Torrent peers with GeoIP", "parameters": [{"name": "torrent_hash", "in": "path", "required": True, "schema": {"type": "string"}}], "responses": {"200": {"description": "Peers"}}}}, + "/api/torrents/{torrent_hash}/trackers": {"get": {"summary": "Torrent trackers", "parameters": [{"name": "torrent_hash", "in": "path", "required": True, "schema": {"type": "string"}}], "responses": {"200": {"description": "Trackers"}}}}, + "/api/speed/limits": {"post": {"summary": "Queue global speed limit change", "requestBody": {"content": {"application/json": {"schema": {"type": "object", "properties": {"down": {"type": "integer", "description": "Bytes per second, 0 unlimited"}, "up": {"type": "integer", "description": "Bytes per second, 0 unlimited"}}}}}}, "responses": {"200": {"description": "Job queued"}}}}, + "/api/system/status": {"get": {"summary": "rTorrent/system status", "description": "For remote profiles CPU/RAM host usage is not returned and usage_available is false.", "responses": {"200": {"description": "Status"}}}}, + "/api/port-check": {"get": {"summary": "Read cached incoming port check status", "responses": {"200": {"description": "Port check status including status, port, public_ip, source, cached, checked_at and checked_at_epoch"}}}, "post": {"summary": "Run incoming port check immediately, bypassing cache", "responses": {"200": {"description": "Fresh port check status including checked_at and checked_at_epoch"}}}}, + "/api/jobs": {"get": {"summary": "List job queue history", "parameters": [{"name": "limit", "in": "query", "schema": {"type": "integer", "default": 50}}, {"name": "offset", "in": "query", "schema": {"type": "integer", "default": 0}}], "responses": {"200": {"description": "Jobs"}}}}, + "/api/jobs/clear": {"post": {"summary": "Clear finished job history", "description": "Deletes jobs that are not pending or running.", "responses": {"200": {"description": "Deleted count"}}}}, + "/api/jobs/{job_id}/cancel": {"post": {"summary": "Cancel pending or failed job", "parameters": [{"name": "job_id", "in": "path", "required": True, "schema": {"type": "string"}}], "responses": {"200": {"description": "Cancelled"}}}}, + "/api/jobs/{job_id}/retry": {"post": {"summary": "Retry failed or cancelled job", "parameters": [{"name": "job_id", "in": "path", "required": True, "schema": {"type": "string"}}], "responses": {"200": {"description": "Retried"}}}}, + "/api/path/browse": {"get": {"summary": "Browse server directories", "parameters": [{"name": "path", "in": "query", "schema": {"type": "string"}}], "responses": {"200": {"description": "Directory listing"}}}}, + "/api/labels": {"get": {"summary": "List labels", "responses": {"200": {"description": "Labels"}}}, "post": {"summary": "Create label", "requestBody": {"content": {"application/json": {"schema": {"type": "object"}}}}, "responses": {"200": {"description": "Labels"}}}}, + "/api/ratio-groups": {"get": {"summary": "List ratio groups", "responses": {"200": {"description": "Ratio groups"}}}, "post": {"summary": "Create or update ratio group", "requestBody": {"content": {"application/json": {"schema": {"type": "object"}}}}, "responses": {"200": {"description": "Ratio groups"}}}}, + "/api/rss": {"get": {"summary": "List RSS feeds and rules", "responses": {"200": {"description": "RSS config"}}}}, + "/api/rss/feeds": {"post": {"summary": "Add RSS feed", "requestBody": {"content": {"application/json": {"schema": {"type": "object"}}}}, "responses": {"200": {"description": "RSS config"}}}}, + "/api/rss/rules": {"post": {"summary": "Add RSS rule", "requestBody": {"content": {"application/json": {"schema": {"type": "object"}}}}, "responses": {"200": {"description": "RSS config"}}}}, + "/api/rss/check": {"post": {"summary": "Manually check RSS feeds", "responses": {"200": {"description": "Queued matches"}}}}, + "/api/smart-queue": {"get": {"summary": "Get Smart Queue settings, exceptions and history", "parameters": [{"name": "history_limit", "in": "query", "schema": {"type": "integer", "default": 10, "minimum": 1, "maximum": 100}, "description": "Number of Smart Queue history rows to return"}], "responses": {"200": {"description": "Smart Queue config with history and history_total"}}}, "post": {"summary": "Save Smart Queue settings", "requestBody": {"content": {"application/json": {"schema": {"type": "object", "properties": {"enabled": {"type": "boolean"}, "max_active_downloads": {"type": "integer"}, "stalled_seconds": {"type": "integer"}, "min_speed_bytes": {"type": "integer"}, "min_seeds": {"type": "integer"}}}}}}, "responses": {"200": {"description": "Saved"}}}}, + "/api/smart-queue/check": {"post": {"summary": "Run Smart Queue immediately", "responses": {"200": {"description": "Smart Queue action result"}}}}, + "/api/smart-queue/exclusion": {"post": {"summary": "Add or remove a torrent Smart Queue exception", "requestBody": {"content": {"application/json": {"schema": {"type": "object", "properties": {"hash": {"type": "string"}, "excluded": {"type": "boolean"}, "reason": {"type": "string"}}}}}}, "responses": {"200": {"description": "Exception list"}}}}, + "/api/traffic/history": {"get": {"summary": "Transfer history for charts", "parameters": [{"name": "range", "in": "query", "schema": {"type": "string", "enum": ["15m", "1h", "3h", "6h", "24h", "7d", "30d", "90d"]}}], "responses": {"200": {"description": "Aggregated traffic history"}}}} + } + paths.update({ + "/api/profiles/{profile_id}": {"delete": {"summary": "Delete rTorrent profile", "parameters": [{"name": "profile_id", "in": "path", "required": True, "schema": {"type": "integer"}}], "responses": {"200": {"description": "Deleted"}}}}, + "/api/path/default": {"get": {"summary": "Read active rTorrent default download path", "responses": {"200": {"description": "Default path"}}}}, + "/api/torrents/{torrent_hash}/files/priority": {"post": {"summary": "Set file priorities", "parameters": [{"name": "torrent_hash", "in": "path", "required": True, "schema": {"type": "string"}}], "requestBody": {"content": {"application/json": {"schema": {"type": "object", "properties": {"files": {"type": "array", "items": {"type": "object", "properties": {"index": {"type": "integer"}, "priority": {"type": "integer", "enum": [0, 1, 2]}}}}}}}}}, "responses": {"200": {"description": "Updated priorities"}, "207": {"description": "Partial update"}}}}, + "/api/torrents/{torrent_hash}/peers/action": {"post": {"summary": "Run peer action", "parameters": [{"name": "torrent_hash", "in": "path", "required": True, "schema": {"type": "string"}}], "requestBody": {"content": {"application/json": {"schema": {"type": "object", "properties": {"peer_index": {"type": "integer"}, "action": {"type": "string", "enum": ["disconnect", "kick", "snub", "unsnub", "ban"]}}}}}}, "responses": {"200": {"description": "Peer action result"}}}}, + "/api/labels/{label_id}": {"delete": {"summary": "Delete saved label", "parameters": [{"name": "label_id", "in": "path", "required": True, "schema": {"type": "integer"}}], "responses": {"200": {"description": "Labels"}}}}, + "/api/rtorrent-config": {"get": {"summary": "Read supported rTorrent config fields", "responses": {"200": {"description": "Config fields"}}}, "post": {"summary": "Save supported rTorrent config fields", "requestBody": {"content": {"application/json": {"schema": {"type": "object", "properties": {"values": {"type": "object"}}}}}}, "responses": {"200": {"description": "Save result"}}}}, + "/api/rtorrent-config/generate": {"post": {"summary": "Generate rTorrent config text from provided values", "requestBody": {"content": {"application/json": {"schema": {"type": "object", "properties": {"values": {"type": "object"}}}}}}, "responses": {"200": {"description": "Generated config text"}}}}, + "/api/automations": {"get": {"summary": "List automation rules and history", "responses": {"200": {"description": "Rules and history"}}}, "post": {"summary": "Create or update automation rule", "requestBody": {"content": {"application/json": {"schema": {"type": "object", "properties": {"name": {"type": "string"}, "enabled": {"type": "boolean"}, "cooldown_minutes": {"type": "integer"}, "conditions": {"type": "array"}, "effects": {"type": "array"}}}}}}, "responses": {"200": {"description": "Rule saved"}}}}, + "/api/automations/{rule_id}": {"delete": {"summary": "Delete automation rule", "parameters": [{"name": "rule_id", "in": "path", "required": True, "schema": {"type": "integer"}}], "responses": {"200": {"description": "Deleted"}}}}, + "/api/automations/check": {"post": {"summary": "Run automation rules immediately", "responses": {"200": {"description": "Automation result"}}}} + }) + components = { + "schemas": { + "ApiOk": { + "type": "object", + "properties": {"ok": {"type": "boolean"}}, + "required": ["ok"], + }, + "Profile": { + "type": "object", + "additionalProperties": True, + "properties": { + "id": {"type": "integer"}, + "name": {"type": "string"}, + "scgi_url": {"type": "string"}, + "timeout_seconds": {"type": "integer"}, + "max_parallel_jobs": {"type": "integer"}, + }, + }, + "Torrent": { + "type": "object", + "additionalProperties": True, + "properties": { + "hash": {"type": "string"}, + "name": {"type": "string"}, + "path": {"type": "string"}, + "status": {"type": "string"}, + "size": {"type": "integer", "format": "int64"}, + "completed_bytes": {"type": "integer", "format": "int64"}, + "down_total": {"type": "integer", "format": "int64"}, + "up_total": {"type": "integer", "format": "int64"}, + "complete": {"type": "boolean"}, + "state": {"type": "boolean"}, + "paused": {"type": "boolean"}, + "hashing": {"type": "integer"}, + "message": {"type": "string"}, + }, + }, + "TorrentFilterSummary": { + "type": "object", + "properties": { + "count": {"type": "integer", "description": "Number of torrents in this filter."}, + "size": {"type": "integer", "format": "int64", "description": "Total torrent payload size in bytes."}, + "disk_bytes": {"type": "integer", "format": "int64", "description": "Completed bytes reported by rTorrent; used as the displayed Data value."}, + "completed_bytes": {"type": "integer", "format": "int64", "description": "Completed bytes reported by rTorrent."}, + "remaining_bytes": {"type": "integer", "format": "int64", "description": "size - completed_bytes, never below zero."}, + "progress_percent": {"type": "number", "format": "float", "description": "Completed percentage for this filter."}, + "remaining_percent": {"type": "number", "format": "float", "description": "Remaining percentage for this filter."}, + "down_total": {"type": "integer", "format": "int64", "deprecated": True, "description": "Backward compatibility field; not used by the filters UI."}, + "up_total": {"type": "integer", "format": "int64", "deprecated": True, "description": "Backward compatibility field; not used by the filters UI."}, + }, + "required": ["count", "size", "disk_bytes", "completed_bytes", "remaining_bytes", "progress_percent", "remaining_percent"], + }, + "TorrentSummaryFilters": { + "type": "object", + "properties": { + "all": {"$ref": "#/components/schemas/TorrentFilterSummary"}, + "downloading": {"$ref": "#/components/schemas/TorrentFilterSummary"}, + "seeding": {"$ref": "#/components/schemas/TorrentFilterSummary"}, + "paused": {"$ref": "#/components/schemas/TorrentFilterSummary"}, + "checking": {"$ref": "#/components/schemas/TorrentFilterSummary"}, + "error": {"$ref": "#/components/schemas/TorrentFilterSummary"}, + "stopped": {"$ref": "#/components/schemas/TorrentFilterSummary"}, + }, + "required": ["all", "downloading", "seeding", "paused", "checking", "error", "stopped"], + }, + "TorrentSummary": { + "type": "object", + "properties": { + "filters": {"$ref": "#/components/schemas/TorrentSummaryFilters"}, + "cache_ttl_seconds": {"type": "integer", "description": "Summary cache TTL in seconds."}, + "generated_at_epoch": {"type": "number", "format": "double", "description": "Unix timestamp when summary was generated."}, + "cached": {"type": "boolean", "description": "True when returned from cache."}, + }, + "required": ["filters", "cache_ttl_seconds", "generated_at_epoch", "cached"], + }, + "TorrentListResponse": { + "allOf": [ + {"$ref": "#/components/schemas/ApiOk"}, + {"type": "object", "properties": { + "profile_id": {"type": "integer"}, + "torrents": {"type": "array", "items": {"$ref": "#/components/schemas/Torrent"}}, + "summary": {"$ref": "#/components/schemas/TorrentSummary"}, + "error": {"type": "string", "nullable": True}, + }, "required": ["torrents", "summary"]}, + ], + }, + "CleanupSummary": { + "type": "object", + "properties": { + "jobs_total": {"type": "integer"}, + "jobs_clearable": {"type": "integer"}, + "smart_queue_history_total": {"type": "integer"}, + "retention_days": {"type": "object", "properties": {"jobs": {"type": "integer"}, "smart_queue_history": {"type": "integer"}}}, + "database": {"type": "object", "properties": {"path": {"type": "string"}, "size": {"type": "integer", "format": "int64"}, "size_h": {"type": "string"}, "error": {"type": "string"}}}, + }, + "required": ["jobs_total", "jobs_clearable", "smart_queue_history_total", "retention_days", "database"], + }, + "CleanupResponse": { + "allOf": [ + {"$ref": "#/components/schemas/ApiOk"}, + {"type": "object", "properties": {"cleanup": {"$ref": "#/components/schemas/CleanupSummary"}, "deleted": {"oneOf": [{"type": "integer"}, {"type": "object"}]}}}, + ], + }, + "PortCheckStatus": { + "type": "object", + "additionalProperties": True, + "properties": { + "status": {"type": "string", "enum": ["open", "closed", "unknown", "disabled", "error"]}, + "enabled": {"type": "boolean"}, + "port": {"type": "integer"}, + "public_ip": {"type": "string"}, + "source": {"type": "string"}, + "cached": {"type": "boolean"}, + "checked_at": {"type": "string", "format": "date-time"}, + "checked_at_epoch": {"type": "number", "format": "double"}, + "error": {"type": "string"}, + }, + }, + "AppStatus": { + "type": "object", + "properties": { + "pytorrent": {"type": "object", "additionalProperties": True}, + "cleanup": {"$ref": "#/components/schemas/CleanupSummary"}, + "profile": {"$ref": "#/components/schemas/Profile"}, + "scgi": {"type": "object", "nullable": True, "additionalProperties": True}, + "port_check": {"$ref": "#/components/schemas/PortCheckStatus"}, + "api_ms": {"type": "number", "format": "float"}, + }, + "required": ["pytorrent", "cleanup", "scgi", "port_check", "api_ms"], + }, + "AppStatusResponse": { + "allOf": [ + {"$ref": "#/components/schemas/ApiOk"}, + {"type": "object", "properties": {"status": {"$ref": "#/components/schemas/AppStatus"}}, "required": ["status"]}, + ], + }, + "JobQueuedResponse": { + "allOf": [ + {"$ref": "#/components/schemas/ApiOk"}, + {"type": "object", "properties": {"job_id": {"type": "string"}, "job_ids": {"type": "array", "items": {"type": "string"}}, "hash_count": {"type": "integer"}, "bulk": {"type": "boolean"}}}, + ], + }, + "TrackerActionResponse": { + "allOf": [ + {"$ref": "#/components/schemas/ApiOk"}, + {"type": "object", "properties": {"result": {"type": "object", "additionalProperties": True}, "message": {"type": "string"}}}, + ], + }, + } + } + + def response_ref(schema_name: str, description: str = "OK") -> dict: + return {"description": description, "content": {"application/json": {"schema": {"$ref": f"#/components/schemas/{schema_name}"}}}} + + paths["/api/torrents"]["get"]["responses"]["200"] = response_ref("TorrentListResponse", "Torrent list with cached filter summary") + paths["/api/torrents/{action_name}"]["post"]["responses"]["200"] = response_ref("JobQueuedResponse", "Job queued") + paths["/api/torrents/add"]["post"]["responses"]["200"] = response_ref("JobQueuedResponse", "Jobs queued") + + paths.update({ + "/api/torrents/{torrent_hash}/trackers/{action_name}": { + "post": { + "summary": "Run tracker action", + "parameters": [ + {"name": "torrent_hash", "in": "path", "required": True, "schema": {"type": "string"}}, + {"name": "action_name", "in": "path", "required": True, "schema": {"type": "string"}}, + ], + "requestBody": {"content": {"application/json": {"schema": {"type": "object"}}}}, + "responses": {"200": response_ref("TrackerActionResponse", "Tracker action result")}, + } + }, + "/api/app/status": { + "get": {"summary": "pyTorrent application status", "responses": {"200": response_ref("AppStatusResponse", "Application status")}} + }, + "/api/cleanup/summary": { + "get": {"summary": "Cleanup summary", "responses": {"200": response_ref("CleanupResponse", "Cleanup summary")}} + }, + "/api/cleanup/jobs": { + "post": {"summary": "Clear finished job history", "responses": {"200": response_ref("CleanupResponse", "Cleanup result")}} + }, + "/api/cleanup/smart-queue": { + "post": {"summary": "Clear Smart Queue history", "responses": {"200": response_ref("CleanupResponse", "Cleanup result")}} + }, + "/api/cleanup/all": { + "post": {"summary": "Clear all cleanup-supported history", "responses": {"200": response_ref("CleanupResponse", "Cleanup result")}} + }, + }) + + return jsonify({"openapi": "3.0.3", "info": {"title": "pyTorrent API", "version": "0.2.0"}, "paths": paths, "components": components}) diff --git a/pytorrent/services/automation_rules.py b/pytorrent/services/automation_rules.py new file mode 100644 index 0000000..0e92382 --- /dev/null +++ b/pytorrent/services/automation_rules.py @@ -0,0 +1,173 @@ +from __future__ import annotations +from datetime import datetime, timezone +from typing import Any +import json +from ..db import connect, default_user_id, utcnow +from . import rtorrent +from .preferences import active_profile + + +def _loads(value: str | None, default: Any) -> Any: + try: return json.loads(value or '') + except Exception: return default + + +def _ts(value: str | None) -> float: + if not value: return 0.0 + try: return datetime.fromisoformat(str(value).replace('Z', '+00:00')).timestamp() + except Exception: return 0.0 + + +def _now_ts() -> float: + return datetime.now(timezone.utc).timestamp() + + +def _label_names(value: str | None) -> list[str]: + seen = [] + for part in str(value or '').replace(';', ',').replace('|', ',').split(','): + item = part.strip() + if item and item not in seen: seen.append(item) + return seen + + +def _label_value(labels: list[str]) -> str: + out = [] + for label in labels: + label = str(label or '').strip() + if label and label not in out: out.append(label) + return ', '.join(out) + + +def _rule_row(row: dict[str, Any]) -> dict[str, Any]: + item = dict(row) + item['conditions'] = _loads(item.pop('conditions_json', '[]'), []) + item['effects'] = _loads(item.pop('effects_json', '[]'), []) + return item + + +def list_rules(profile_id: int | None = None, user_id: int | None = None) -> list[dict[str, Any]]: + user_id = user_id or default_user_id() + if profile_id is None: + profile = active_profile(); profile_id = int(profile['id']) if profile else None + with connect() as conn: + rows = conn.execute('SELECT * FROM automation_rules WHERE user_id=? AND (profile_id=? OR profile_id IS NULL) ORDER BY enabled DESC, name COLLATE NOCASE', (user_id, profile_id)).fetchall() + return [_rule_row(r) for r in rows] + + +def get_rule(rule_id: int, profile_id: int, user_id: int | None = None) -> dict[str, Any]: + user_id = user_id or default_user_id() + with connect() as conn: + row = conn.execute('SELECT * FROM automation_rules WHERE id=? AND user_id=? AND profile_id=?', (rule_id, user_id, profile_id)).fetchone() + if not row: raise ValueError('Rule not found') + return _rule_row(row) + + +def save_rule(profile_id: int, data: dict[str, Any], user_id: int | None = None) -> dict[str, Any]: + user_id = user_id or default_user_id() + name = str(data.get('name') or 'Automation rule').strip() or 'Automation rule' + conditions = data.get('conditions') or [] + effects = data.get('effects') or [] + if not isinstance(conditions, list) or not conditions: raise ValueError('Rule needs at least one condition') + if not isinstance(effects, list) or not effects: raise ValueError('Rule needs at least one effect') + cooldown = max(0, int(data.get('cooldown_minutes') or 0)) + enabled = 1 if data.get('enabled', True) else 0 + now = utcnow(); rule_id = int(data.get('id') or 0) + with connect() as conn: + if rule_id: + conn.execute('UPDATE automation_rules SET name=?, enabled=?, conditions_json=?, effects_json=?, cooldown_minutes=?, updated_at=? WHERE id=? AND user_id=? AND profile_id=?', (name, enabled, json.dumps(conditions), json.dumps(effects), cooldown, now, rule_id, user_id, profile_id)) + else: + cur = conn.execute('INSERT INTO automation_rules(user_id,profile_id,name,enabled,conditions_json,effects_json,cooldown_minutes,created_at,updated_at) VALUES(?,?,?,?,?,?,?,?,?)', (user_id, profile_id, name, enabled, json.dumps(conditions), json.dumps(effects), cooldown, now, now)) + rule_id = int(cur.lastrowid) + return get_rule(rule_id, profile_id, user_id) + + +def delete_rule(rule_id: int, profile_id: int, user_id: int | None = None) -> None: + user_id = user_id or default_user_id() + with connect() as conn: + conn.execute('DELETE FROM automation_rules WHERE id=? AND user_id=? AND profile_id=?', (rule_id, user_id, profile_id)) + conn.execute('DELETE FROM automation_rule_state WHERE rule_id=? AND profile_id=?', (rule_id, profile_id)) + + +def list_history(profile_id: int, user_id: int | None = None, limit: int = 30) -> list[dict[str, Any]]: + user_id = user_id or default_user_id() + with connect() as conn: + return conn.execute('SELECT * FROM automation_history WHERE user_id=? AND profile_id=? ORDER BY created_at DESC LIMIT ?', (user_id, profile_id, max(1, min(int(limit or 30), 100)))).fetchall() + + +def _condition_true(t: dict[str, Any], cond: dict[str, Any]) -> bool: + typ = str(cond.get('type') or '') + if typ == 'completed': return bool(int(t.get('complete') or 0)) + if typ == 'no_seeds': return int(t.get('seeds') or 0) <= int(cond.get('seeds') or 0) + if typ == 'ratio_gte': return float(t.get('ratio') or 0) >= float(cond.get('ratio') or 0) + if typ == 'label_missing': return str(cond.get('label') or '').strip() not in _label_names(t.get('label')) + if typ == 'label_has': return str(cond.get('label') or '').strip() in _label_names(t.get('label')) + if typ == 'status': return str(t.get('status') or '').lower() == str(cond.get('status') or '').lower() + if typ == 'path_contains': return str(cond.get('text') or '').lower() in str(t.get('path') or '').lower() + return False + + +def _conditions_match(conn, rule: dict[str, Any], profile_id: int, t: dict[str, Any]) -> bool: + h = str(t.get('hash') or '') + if not h: return False + immediate_ok = True; delayed_ok = True; now = utcnow(); now_ts = _now_ts() + for cond in rule.get('conditions') or []: + ok = _condition_true(t, cond) + if cond.get('type') == 'no_seeds' and int(cond.get('minutes') or 0) > 0: + row = conn.execute('SELECT condition_since_at FROM automation_rule_state WHERE rule_id=? AND profile_id=? AND torrent_hash=?', (rule['id'], profile_id, h)).fetchone() + if ok: + since = row['condition_since_at'] if row and row.get('condition_since_at') else now + conn.execute('INSERT INTO automation_rule_state(rule_id,profile_id,torrent_hash,condition_since_at,last_matched_at,updated_at) VALUES(?,?,?,?,?,?) ON CONFLICT(rule_id,profile_id,torrent_hash) DO UPDATE SET condition_since_at=COALESCE(automation_rule_state.condition_since_at, excluded.condition_since_at), last_matched_at=excluded.last_matched_at, updated_at=excluded.updated_at', (rule['id'], profile_id, h, since, now, now)) + delayed_ok = delayed_ok and (now_ts - _ts(since) >= int(cond.get('minutes') or 0) * 60) + else: + conn.execute('UPDATE automation_rule_state SET condition_since_at=NULL, updated_at=? WHERE rule_id=? AND profile_id=? AND torrent_hash=?', (now, rule['id'], profile_id, h)); delayed_ok = False + else: + immediate_ok = immediate_ok and ok + return immediate_ok and delayed_ok + + +def _cooldown_ok(conn, rule: dict[str, Any], profile_id: int, torrent_hash: str) -> bool: + cooldown = int(rule.get('cooldown_minutes') or 0) + row = conn.execute('SELECT last_applied_at FROM automation_rule_state WHERE rule_id=? AND profile_id=? AND torrent_hash=?', (rule['id'], profile_id, torrent_hash)).fetchone() + if not row or not row.get('last_applied_at'): return True + return _now_ts() - _ts(row['last_applied_at']) >= cooldown * 60 + + +def _apply_effects(c: Any, profile: dict[str, Any], torrent: dict[str, Any], effects: list[dict[str, Any]]) -> list[dict[str, Any]]: + h = str(torrent.get('hash') or ''); labels = _label_names(torrent.get('label')); applied = [] + for eff in effects: + typ = str(eff.get('type') or '') + if typ == 'move': + path = str(eff.get('path') or '').strip() or rtorrent.default_download_path(profile) + if path: c.call('d.directory.set', h, path); applied.append({'type': 'move', 'path': path}) + elif typ == 'add_label': + label = str(eff.get('label') or '').strip() + if label and label not in labels: labels.append(label); c.call('d.custom1.set', h, _label_value(labels)) + if label: applied.append({'type': 'add_label', 'label': label}) + elif typ == 'remove_label': + label = str(eff.get('label') or '').strip(); labels = [x for x in labels if x != label]; c.call('d.custom1.set', h, _label_value(labels)); applied.append({'type': 'remove_label', 'label': label}) + elif typ == 'set_labels': + value = _label_value(_label_names(eff.get('labels'))); c.call('d.custom1.set', h, value); labels = _label_names(value); applied.append({'type': 'set_labels', 'labels': value}) + elif typ in {'pause', 'stop', 'start', 'resume', 'recheck'}: + method = {'pause':'d.pause','stop':'d.stop','start':'d.start','resume':'d.resume','recheck':'d.check_hash'}[typ]; c.call(method, h); applied.append({'type': typ}) + return applied + + +def check(profile: dict | None = None, user_id: int | None = None, force: bool = False) -> dict[str, Any]: + profile = profile or active_profile() + if not profile: return {'ok': False, 'error': 'No active rTorrent profile'} + user_id = user_id or default_user_id(); profile_id = int(profile['id']) + rules = [r for r in list_rules(profile_id, user_id) if force or int(r.get('enabled') or 0)] + if not rules: return {'ok': True, 'checked': 0, 'applied': [], 'rules': 0} + torrents = rtorrent.list_torrents(profile); c = rtorrent.client_for(profile); applied = []; now = utcnow() + with connect() as conn: + for rule in rules: + for t in torrents: + h = str(t.get('hash') or '') + if not _conditions_match(conn, rule, profile_id, t): continue + if not force and not _cooldown_ok(conn, rule, profile_id, h): continue + try: actions = _apply_effects(c, profile, t, rule.get('effects') or []) + except Exception as exc: actions = [{'error': str(exc)}] + conn.execute('INSERT INTO automation_rule_state(rule_id,profile_id,torrent_hash,last_matched_at,last_applied_at,updated_at) VALUES(?,?,?,?,?,?) ON CONFLICT(rule_id,profile_id,torrent_hash) DO UPDATE SET last_matched_at=excluded.last_matched_at, last_applied_at=excluded.last_applied_at, updated_at=excluded.updated_at', (rule['id'], profile_id, h, now, now, now)) + conn.execute('INSERT INTO automation_history(user_id,profile_id,rule_id,torrent_hash,torrent_name,rule_name,actions_json,created_at) VALUES(?,?,?,?,?,?,?,?)', (user_id, profile_id, rule['id'], h, str(t.get('name') or ''), str(rule.get('name') or ''), json.dumps(actions), now)) + applied.append({'rule_id': rule['id'], 'rule_name': rule.get('name'), 'hash': h, 'name': t.get('name'), 'actions': actions}) + return {'ok': True, 'checked': len(torrents), 'rules': len(rules), 'applied': applied} diff --git a/pytorrent/services/geoip.py b/pytorrent/services/geoip.py new file mode 100644 index 0000000..6cd9342 --- /dev/null +++ b/pytorrent/services/geoip.py @@ -0,0 +1,38 @@ +from __future__ import annotations + +from functools import lru_cache +from pathlib import Path +from ..config import GEOIP_DB + +try: + import geoip2.database +except Exception: # pragma: no cover + geoip2 = None + +_reader = None + + +def _get_reader(): + global _reader + if _reader is not None: + return _reader + if not GEOIP_DB.exists() or geoip2 is None: + return None + _reader = geoip2.database.Reader(str(GEOIP_DB)) + return _reader + + +@lru_cache(maxsize=50000) +def lookup_ip(ip: str) -> dict: + reader = _get_reader() + if not reader: + return {"country_iso": "", "country": "", "city": ""} + try: + hit = reader.city(ip) + return { + "country_iso": (hit.country.iso_code or "").lower(), + "country": hit.country.name or "", + "city": hit.city.name or "", + } + except Exception: + return {"country_iso": "", "country": "", "city": ""} diff --git a/pytorrent/services/preferences.py b/pytorrent/services/preferences.py new file mode 100644 index 0000000..00c1125 --- /dev/null +++ b/pytorrent/services/preferences.py @@ -0,0 +1,176 @@ +from __future__ import annotations + +from ..db import connect, utcnow, default_user_id + +BOOTSTRAP_THEMES = { + "default": "Default Bootstrap", + "flatly": "Flatly", + "litera": "Litera", + "lumen": "Lumen", + "minty": "Minty", + "sketchy": "Sketchy", + "solar": "Solar", + "spacelab": "Spacelab", + "united": "United", + "zephyr": "Zephyr", +} + +FONT_FAMILIES = { + "default": "Theme default", + "adwaita-mono": "Adwaita Mono", + "inter": "Inter", + "system-ui": "System UI", + "source-sans-3": "Source Sans 3", + "jetbrains-mono": "JetBrains Mono", +} + +def bootstrap_css_url(theme: str | None) -> str: + theme = theme if theme in BOOTSTRAP_THEMES else "default" + if theme == "default": + return "https://cdn.jsdelivr.net/npm/bootstrap@5.3.3/dist/css/bootstrap.min.css" + return f"https://cdn.jsdelivr.net/npm/bootswatch@5.3.3/dist/{theme}/bootstrap.min.css" + + +def list_profiles(user_id: int | None = None): + user_id = user_id or default_user_id() + with connect() as conn: + return conn.execute( + "SELECT * FROM rtorrent_profiles WHERE user_id=? ORDER BY is_default DESC, name COLLATE NOCASE", + (user_id,), + ).fetchall() + + +def get_profile(profile_id: int, user_id: int | None = None): + user_id = user_id or default_user_id() + with connect() as conn: + return conn.execute( + "SELECT * FROM rtorrent_profiles WHERE id=? AND user_id=?", + (profile_id, user_id), + ).fetchone() + + +def active_profile(user_id: int | None = None): + user_id = user_id or default_user_id() + with connect() as conn: + pref = conn.execute("SELECT active_rtorrent_id FROM user_preferences WHERE user_id=?", (user_id,)).fetchone() + if pref and pref.get("active_rtorrent_id"): + row = conn.execute( + "SELECT * FROM rtorrent_profiles WHERE id=? AND user_id=?", + (pref["active_rtorrent_id"], user_id), + ).fetchone() + if row: + return row + row = conn.execute( + "SELECT * FROM rtorrent_profiles WHERE user_id=? ORDER BY is_default DESC, id ASC LIMIT 1", + (user_id,), + ).fetchone() + return row + + +def save_profile(data: dict, user_id: int | None = None): + user_id = user_id or default_user_id() + now = utcnow() + name = str(data.get("name") or "rTorrent").strip() + scgi_url = str(data.get("scgi_url") or "").strip() + timeout = int(data.get("timeout_seconds") or 5) + max_parallel = int(data.get("max_parallel_jobs") or 5) + is_remote = 1 if data.get("is_remote") else 0 + is_default = 1 if data.get("is_default") else 0 + if not scgi_url.startswith("scgi://"): + raise ValueError("SCGI URL musi zaczynać się od scgi://") + with connect() as conn: + if is_default: + conn.execute("UPDATE rtorrent_profiles SET is_default=0 WHERE user_id=?", (user_id,)) + cur = conn.execute( + "INSERT INTO rtorrent_profiles(user_id,name,scgi_url,is_default,timeout_seconds,max_parallel_jobs,is_remote,created_at,updated_at) VALUES(?,?,?,?,?,?,?,?,?)", + (user_id, name, scgi_url, is_default, timeout, max_parallel, is_remote, now, now), + ) + profile_id = cur.lastrowid + pref = conn.execute("SELECT active_rtorrent_id FROM user_preferences WHERE user_id=?", (user_id,)).fetchone() + if not pref or not pref.get("active_rtorrent_id") or is_default: + conn.execute( + "UPDATE user_preferences SET active_rtorrent_id=?, updated_at=? WHERE user_id=?", + (profile_id, now, user_id), + ) + return conn.execute("SELECT * FROM rtorrent_profiles WHERE id=? AND user_id=?", (profile_id, user_id)).fetchone() + + +def update_profile(profile_id: int, data: dict, user_id: int | None = None): + user_id = user_id or default_user_id() + now = utcnow() + name = str(data.get("name") or "rTorrent").strip() + scgi_url = str(data.get("scgi_url") or "").strip() + timeout = int(data.get("timeout_seconds") or 5) + max_parallel = int(data.get("max_parallel_jobs") or 5) + is_remote = 1 if data.get("is_remote") else 0 + is_default = 1 if data.get("is_default") else 0 + if not scgi_url.startswith("scgi://"): + raise ValueError("SCGI URL musi zaczynać się od scgi://") + with connect() as conn: + row = conn.execute("SELECT id FROM rtorrent_profiles WHERE id=? AND user_id=?", (profile_id, user_id)).fetchone() + if not row: + raise ValueError("Profil nie istnieje") + if is_default: + conn.execute("UPDATE rtorrent_profiles SET is_default=0 WHERE user_id=?", (user_id,)) + conn.execute( + "UPDATE rtorrent_profiles SET name=?, scgi_url=?, is_default=?, timeout_seconds=?, max_parallel_jobs=?, is_remote=?, updated_at=? WHERE id=? AND user_id=?", + (name, scgi_url, is_default, timeout, max_parallel, is_remote, now, profile_id, user_id), + ) + return conn.execute("SELECT * FROM rtorrent_profiles WHERE id=? AND user_id=?", (profile_id, user_id)).fetchone() + + +def delete_profile(profile_id: int, user_id: int | None = None): + user_id = user_id or default_user_id() + with connect() as conn: + conn.execute("DELETE FROM rtorrent_profiles WHERE id=? AND user_id=?", (profile_id, user_id)) + active = active_profile(user_id) + conn.execute( + "UPDATE user_preferences SET active_rtorrent_id=?, updated_at=? WHERE user_id=?", + (active["id"] if active else None, utcnow(), user_id), + ) + + +def activate_profile(profile_id: int, user_id: int | None = None): + user_id = user_id or default_user_id() + with connect() as conn: + row = conn.execute("SELECT id FROM rtorrent_profiles WHERE id=? AND user_id=?", (profile_id, user_id)).fetchone() + if not row: + raise ValueError("Profil nie istnieje") + conn.execute( + "UPDATE user_preferences SET active_rtorrent_id=?, updated_at=? WHERE user_id=?", + (profile_id, utcnow(), user_id), + ) + return get_profile(profile_id, user_id) + + +def get_preferences(user_id: int | None = None): + user_id = user_id or default_user_id() + with connect() as conn: + return conn.execute("SELECT * FROM user_preferences WHERE user_id=?", (user_id,)).fetchone() + + +def save_preferences(data: dict, user_id: int | None = None): + user_id = user_id or default_user_id() + allowed_theme = data.get("theme") if data.get("theme") in {"light", "dark"} else None + bootstrap_theme = data.get("bootstrap_theme") if data.get("bootstrap_theme") in BOOTSTRAP_THEMES else None + font_family = data.get("font_family") if data.get("font_family") in FONT_FAMILIES else None + table_columns_json = data.get("table_columns_json") + peers_refresh_seconds = data.get("peers_refresh_seconds") + port_check_enabled = data.get("port_check_enabled") + with connect() as conn: + now = utcnow() + if allowed_theme: + conn.execute("UPDATE user_preferences SET theme=?, updated_at=? WHERE user_id=?", (allowed_theme, now, user_id)) + if bootstrap_theme: + conn.execute("UPDATE user_preferences SET bootstrap_theme=?, updated_at=? WHERE user_id=?", (bootstrap_theme, now, user_id)) + if font_family: + conn.execute("UPDATE user_preferences SET font_family=?, updated_at=? WHERE user_id=?", (font_family, now, user_id)) + if table_columns_json is not None: + conn.execute("UPDATE user_preferences SET table_columns_json=?, updated_at=? WHERE user_id=?", (str(table_columns_json), now, user_id)) + if peers_refresh_seconds is not None: + sec = int(peers_refresh_seconds or 0) + if sec not in {0, 10, 15, 30, 60}: sec = 0 + conn.execute("UPDATE user_preferences SET peers_refresh_seconds=?, updated_at=? WHERE user_id=?", (sec, now, user_id)) + if port_check_enabled is not None: + conn.execute("UPDATE user_preferences SET port_check_enabled=?, updated_at=? WHERE user_id=?", (1 if port_check_enabled else 0, now, user_id)) + return get_preferences(user_id) diff --git a/pytorrent/services/retention.py b/pytorrent/services/retention.py new file mode 100644 index 0000000..d7a03a7 --- /dev/null +++ b/pytorrent/services/retention.py @@ -0,0 +1,47 @@ +from __future__ import annotations + +from datetime import datetime, timedelta, timezone + +from ..config import JOBS_RETENTION_DAYS, LOG_RETENTION_DAYS, SMART_QUEUE_HISTORY_RETENTION_DAYS, TRAFFIC_HISTORY_RETENTION_DAYS +from ..db import connect + +_LAST_CLEANUP = 0.0 +CLEANUP_EVERY_SECONDS = 3600 + + +def _cutoff(days: int) -> str: + return (datetime.now(timezone.utc) - timedelta(days=max(1, int(days or 1)))).isoformat(timespec="seconds") + + +def _table_exists(conn, table: str) -> bool: + row = conn.execute("SELECT name FROM sqlite_master WHERE type='table' AND name=?", (table,)).fetchone() + return bool(row) + + +def cleanup(force: bool = False) -> dict[str, int]: + global _LAST_CLEANUP + now_ts = datetime.now(timezone.utc).timestamp() + if not force and now_ts - _LAST_CLEANUP < CLEANUP_EVERY_SECONDS: + return {} + _LAST_CLEANUP = now_ts + + deleted: dict[str, int] = {} + with connect() as conn: + targets = { + "traffic_history": ("created_at", TRAFFIC_HISTORY_RETENTION_DAYS), + "smart_queue_history": ("created_at", SMART_QUEUE_HISTORY_RETENTION_DAYS), + "jobs": ("updated_at", JOBS_RETENTION_DAYS), + "logs": ("created_at", LOG_RETENTION_DAYS), + } + for table, (column, days) in targets.items(): + if not _table_exists(conn, table): + continue + if table == "jobs": + cur = conn.execute( + f"DELETE FROM {table} WHERE {column} < ? AND status IN ('done','failed','cancelled')", + (_cutoff(days),), + ) + else: + cur = conn.execute(f"DELETE FROM {table} WHERE {column} < ?", (_cutoff(days),)) + deleted[table] = int(cur.rowcount or 0) + return deleted diff --git a/pytorrent/services/rtorrent.py b/pytorrent/services/rtorrent.py new file mode 100644 index 0000000..3432b9e --- /dev/null +++ b/pytorrent/services/rtorrent.py @@ -0,0 +1,1079 @@ +from __future__ import annotations + +import os +import posixpath +import socket +import time +import uuid +from urllib.parse import urlparse +from xmlrpc.client import Binary, dumps, loads +from ..utils import human_rate, human_size +from ..db import connect, default_user_id, utcnow + + +class ScgiMethod: + def __init__(self, client: "ScgiRtorrentClient", name: str): + self.client = client + self.name = name + + def __getattr__(self, name: str): + return ScgiMethod(self.client, f"{self.name}.{name}") + + def __call__(self, *args): + return self.client.call(self.name, *args) + + +class ScgiRtorrentClient: + """XML-RPC over SCGI client for rTorrent network.scgi.open_port.""" + + def __init__(self, url: str, timeout: int = 5): + parsed = urlparse(url) + if parsed.scheme != "scgi": + raise ValueError("SCGI URL must start with scgi://") + if not parsed.hostname or not parsed.port: + raise ValueError("SCGI URL must include host and port, e.g. scgi://127.0.0.1:5000/RPC2") + self.host = parsed.hostname + self.port = parsed.port + self.timeout = timeout + self.path = parsed.path or "/RPC2" + + def __getattr__(self, name: str): + return ScgiMethod(self, name) + + def call(self, method_name: str, *args): + body = dumps(args, methodname=method_name, allow_none=True).encode("utf-8") + headers = { + "CONTENT_LENGTH": str(len(body)), + "SCGI": "1", + "REQUEST_METHOD": "POST", + "REQUEST_URI": self.path, + "SCRIPT_NAME": self.path, + "SERVER_PROTOCOL": "HTTP/1.1", + "CONTENT_TYPE": "text/xml", + } + header_blob = b"".join(k.encode() + b"\0" + v.encode() + b"\0" for k, v in headers.items()) + payload = str(len(header_blob)).encode("ascii") + b":" + header_blob + b"," + body + with socket.create_connection((self.host, self.port), timeout=self.timeout) as sock: + sock.settimeout(self.timeout) + sock.sendall(payload) + chunks: list[bytes] = [] + while True: + chunk = sock.recv(65536) + if not chunk: + break + chunks.append(chunk) + response = b"".join(chunks) + if not response: + raise ConnectionError("Empty response from rTorrent SCGI") + if b"\r\n\r\n" in response: + response = response.split(b"\r\n\r\n", 1)[1] + elif b"\n\n" in response: + response = response.split(b"\n\n", 1)[1] + result, _ = loads(response) + return result[0] if len(result) == 1 else result + + +def client_for(profile: dict) -> ScgiRtorrentClient: + return ScgiRtorrentClient(profile["scgi_url"], int(profile.get("timeout_seconds") or 5)) + + +_UNSUPPORTED_EXEC_METHODS: set[str] = set() + +def _rt_execute(c: ScgiRtorrentClient, method: str, *args): + """Run rTorrent execute.* as the rTorrent user across XML-RPC variants.""" + method_names = [method] + if method.startswith("execute."): + execute2 = method.replace("execute.", "execute2.", 1) + if execute2 not in _UNSUPPORTED_EXEC_METHODS: + method_names.append(execute2) + errors = [] + for method_name in method_names: + for call_args in (("", *args), args): + try: + return c.call(method_name, *call_args) + except Exception as exc: + message = str(exc) + if "not defined" in message.lower(): + _UNSUPPORTED_EXEC_METHODS.add(method_name) + preview = ", ".join(repr(x) for x in call_args[:3]) + if len(call_args) > 3: + preview += ", ..." + errors.append(f"{method_name}({preview}): {exc}") + raise RuntimeError("rTorrent execute failed: " + "; ".join(errors)) + + +def _is_rt_timeout_error(exc: Exception) -> bool: + return isinstance(exc, (TimeoutError, socket.timeout)) or "timed out" in str(exc).lower() + + +def _rt_execute_allow_timeout(c: ScgiRtorrentClient, method: str, *args): + try: + return _rt_execute(c, method, *args) + except Exception as exc: + if _is_rt_timeout_error(exc): + return None + raise + + +def _remote_clean_path(path: str) -> str: + path = str(path or "").strip() + return posixpath.normpath(path) if path else path + + +def _remote_join(*parts: str) -> str: + cleaned = [str(p).strip().rstrip("/") for p in parts if str(p).strip()] + return posixpath.normpath(posixpath.join(*cleaned)) if cleaned else "" + + +def _run_remote_move(c: ScgiRtorrentClient, src: str, dst: str, poll_interval: float = 2.0) -> None: + """Run a remote mv without binding the transfer time to the SCGI timeout.""" + token = uuid.uuid4().hex + status_path = f"/tmp/pytorrent-move-{token}.status" + start_script = ( + 'src=$1; dst=$2; status=$3; tmp=${status}.tmp; ' + 'rm -f "$status" "$tmp"; ' + '( ' + 'rc=0; ' + 'parent=${dst%/*}; ' + 'if [ -z "$dst" ] || [ "$dst" = "/" ]; then echo "unsafe destination: $dst" >&2; rc=5; fi; ' + 'if [ $rc -eq 0 ] && [ -n "$parent" ] && [ "$parent" != "$dst" ]; then mkdir -p "$parent" || rc=$?; fi; ' + 'if [ $rc -eq 0 ] && [ "$src" = "$dst" ]; then :; ' + 'elif [ $rc -eq 0 ] && { [ -e "$dst" ] || [ -L "$dst" ]; } && [ ! -e "$src" ] && [ ! -L "$src" ]; then :; ' + 'elif [ $rc -eq 0 ] && [ ! -e "$src" ] && [ ! -L "$src" ]; then echo "source missing: $src" >&2; rc=3; ' + 'elif [ $rc -eq 0 ] && { [ -e "$dst" ] || [ -L "$dst" ]; }; then rm -rf -- "$dst" && mv -f -- "$src" "$dst" || rc=$?; ' + 'elif [ $rc -eq 0 ]; then mv -f -- "$src" "$dst" || rc=$?; ' + 'fi; ' + 'if [ $rc -eq 0 ]; then printf "OK\n" > "$status"; ' + 'else printf "ERR %s\n" "$rc" > "$status"; fi; ' + 'if [ -s "$tmp" ]; then cat "$tmp" >> "$status"; fi; ' + 'rm -f "$tmp" ' + ') > "$tmp" 2>&1 &' + ) + poll_script = 'status=$1; [ -f "$status" ] && cat "$status" || true' + cleanup_script = 'rm -f "$1"' + + _rt_execute_allow_timeout(c, "execute.throw", "sh", "-c", start_script, "pytorrent-move-start", src, dst, status_path) + + while True: + time.sleep(max(0.25, poll_interval)) + try: + output = str(_rt_execute(c, "execute.capture", "sh", "-c", poll_script, "pytorrent-move-poll", status_path) or "").strip() + except Exception as exc: + if _is_rt_timeout_error(exc): + continue + raise + if not output: + continue + try: + _rt_execute(c, "execute.throw", "sh", "-c", cleanup_script, "pytorrent-move-clean", status_path) + except Exception: + pass + first_line = output.splitlines()[0].strip() + if first_line == "OK": + return + if first_line.startswith("ERR"): + details = "\n".join(output.splitlines()[1:]).strip() + raise RuntimeError(details or first_line) + raise RuntimeError(output) + + +def _torrent_data_path(c: ScgiRtorrentClient, torrent_hash: str) -> str: + """Return data path as rTorrent sees it; do not touch pyTorrent local FS.""" + try: + src = str(c.call("d.base_path", torrent_hash) or "").strip() + if src: + return src + except Exception: + pass + directory = str(c.call("d.directory", torrent_hash) or "").strip() + name = str(c.call("d.name", torrent_hash) or "").strip() + try: + is_multi = int(c.call("d.is_multi_file", torrent_hash) or 0) + except Exception: + is_multi = 0 + if is_multi: + return directory + if directory and name: + return _remote_join(directory, name) + return directory + + +def _safe_rm_rf_path(path: str) -> str: + path = _remote_clean_path(path) + if not path or path in {"/", "."}: + raise ValueError("Refusing to remove an unsafe data path") + if path.rstrip("/").count("/") < 1: + raise ValueError(f"Refusing to remove an unsafe data path: {path}") + return path + + +def _remove_torrent_data(c: ScgiRtorrentClient, torrent_hash: str) -> dict: + data_path = _safe_rm_rf_path(_torrent_data_path(c, torrent_hash)) + try: + c.call("d.stop", torrent_hash) + except Exception: + pass + try: + c.call("d.close", torrent_hash) + except Exception: + pass + _rt_execute(c, "execute.throw", "rm", "-rf", data_path) + return {"hash": torrent_hash, "removed_path": data_path} + + +def browse_path(profile: dict, path: str | None = None) -> dict: + """List directories through rTorrent execute.capture to avoid pyTorrent FS permissions.""" + c = client_for(profile) + base = _remote_clean_path(path or default_download_path(profile)) + script = ( + 'base=$1; ' + '[ -d "$base" ] || exit 2; ' + 'for p in "$base"/* "$base"/.[!.]* "$base"/..?*; do ' + '[ -d "$p" ] || continue; ' + 'name=${p##*/}; ' + 'printf "%s\t%s\n" "$name" "$p"; ' + 'done' + ) + output = _rt_execute(c, "execute.capture", "sh", "-c", script, "pytorrent-browse", base) + dirs = [] + for line in str(output or "").splitlines(): + if "\t" not in line: + continue + name, full_path = line.split("\t", 1) + if name not in {".", ".."}: + dirs.append({"name": name, "path": full_path}) + dirs.sort(key=lambda x: x["name"].lower()) + parent = posixpath.dirname(base.rstrip("/")) or "/" + if parent == base: + parent = base + return {"path": base, "parent": parent, "dirs": dirs[:300], "source": "rtorrent"} + + +TORRENT_FIELDS = [ + "d.hash=", "d.name=", "d.state=", "d.complete=", "d.size_bytes=", "d.completed_bytes=", + "d.ratio=", "d.up.rate=", "d.down.rate=", "d.up.total=", "d.down.total=", "d.peers_connected=", + "d.peers_complete=", "d.priority=", "d.directory=", "d.base_path=", "d.creation_date=", "d.custom1=", + "d.custom=py_ratio_group", "d.message=", "d.hashing=", "d.is_active=", "d.is_multi_file=", +] + + +def normalize_row(row: list) -> dict: + size = int(row[4] or 0) + completed = int(row[5] or 0) + progress = 100.0 if size <= 0 and int(row[3] or 0) else round((completed / size) * 100, 2) if size else 0.0 + ratio_raw = int(row[6] or 0) + down_rate = int(row[8] or 0) + up_rate = int(row[7] or 0) + directory = str(row[14] or "") + base_path = str(row[15] or "") + is_multi_file = int(row[22] or 0) if len(row) > 22 else 0 + + # Show the selected download location only. Hide the torrent root + # directory for multi-file torrents and the filename for single-file + # torrents. Data deletion still uses the full d.base_path elsewhere. + if base_path and base_path != "/": + display_parent = posixpath.dirname(base_path.rstrip("/")) or "/" + display_path = display_parent.rstrip("/") + "/" if display_parent != "/" else display_parent + elif directory and is_multi_file and directory != "/": + display_parent = posixpath.dirname(directory.rstrip("/")) or "/" + display_path = display_parent.rstrip("/") + "/" if display_parent != "/" else display_parent + elif directory: + display_path = directory.rstrip("/") + "/" if directory != "/" else directory + else: + display_path = "" + msg = str(row[19] or "") + msg_l = msg.lower() + hashing = int(row[20] or 0) if len(row) > 20 else 0 + is_active = int(row[21] or 0) if len(row) > 21 else int(row[2] or 0) + state = int(row[2] or 0) + complete = int(row[3] or 0) + is_checking = bool(hashing) or ("hash" in msg_l and ("check" in msg_l or "checking" in msg_l)) or "recheck" in msg_l + is_paused = bool(state) and not bool(is_active) and not is_checking + status = "Checking" if is_checking else "Paused" if is_paused else "Seeding" if complete and state else "Downloading" if state else "Stopped" + return { + "hash": str(row[0] or ""), + "name": str(row[1] or ""), + "state": state, + "active": is_active, + "paused": is_paused, + "complete": complete, + "size": size, + "size_h": human_size(size), + "completed_bytes": completed, + "progress": progress, + "ratio": round(ratio_raw / 1000, 3), + "up_rate": up_rate, + "up_rate_h": human_rate(up_rate), + "down_rate": down_rate, + "down_rate_h": human_rate(down_rate), + "up_total": int(row[9] or 0), + "up_total_h": human_size(row[9] or 0), + "down_total": int(row[10] or 0), + "down_total_h": human_size(row[10] or 0), + "peers": int(row[11] or 0), + "seeds": int(row[12] or 0), + "priority": int(row[13] or 0), + "path": display_path, + "created": int(row[16] or 0), + "label": str(row[17] or ""), + "ratio_group": str(row[18] or ""), + "message": msg, + "status": status, + "hashing": hashing, + } + + +def list_torrents(profile: dict) -> list[dict]: + rows = client_for(profile).d.multicall2("", "main", *TORRENT_FIELDS) + return [normalize_row(list(row)) for row in rows] + + +_DISK_USAGE_CACHE: dict[str, tuple[float, dict]] = {} +_DISK_USAGE_TTL_SECONDS = 30.0 +_REMOTE_USAGE_CACHE: dict[int, tuple[float, dict]] = {} +_REMOTE_USAGE_TTL_SECONDS = 60.0 +_REMOTE_PUBLIC_IP_CACHE: dict[int, tuple[float, str]] = {} +_REMOTE_PUBLIC_IP_TTL_SECONDS = 6 * 60 * 60.0 + + +def remote_public_ip(profile: dict, force: bool = False) -> str: + profile_id = int(profile.get("id") or 0) + now = time.monotonic() + cached = _REMOTE_PUBLIC_IP_CACHE.get(profile_id) + if cached and not force and now - cached[0] < _REMOTE_PUBLIC_IP_TTL_SECONDS: + return cached[1] + script = ( + 'for url in https://ifconfig.co https://ifconfig.me https://ipapi.linuxiarz.pl http://ifconfig.co http://ifconfig.me; do ' + 'ip=$(curl -fsS --max-time 8 "$url" 2>/dev/null | tr -d "\r" | head -n 1 | sed "s/[^0-9a-fA-F:.]//g"); ' + 'if [ -n "$ip" ]; then printf "%s" "$ip"; exit 0; fi; ' + 'done; exit 1' + ) + value = str(_rt_execute(client_for(profile), "execute.capture", "sh", "-c", script) or "").strip() + if not value: + raise RuntimeError("Cannot read remote public IP") + _REMOTE_PUBLIC_IP_CACHE[profile_id] = (now, value) + return value + + +def remote_system_usage(profile: dict, force: bool = False) -> dict: + profile_id = int(profile.get("id") or 0) + now = time.monotonic() + cached = _REMOTE_USAGE_CACHE.get(profile_id) + if cached and not force and now - cached[0] < _REMOTE_USAGE_TTL_SECONDS: + usage = dict(cached[1]) + usage["cached"] = True + return usage + script = ( + 'read cpu user nice system idle iowait irq softirq steal guest guest_nice < /proc/stat; ' + 'total1=$((user+nice+system+idle+iowait+irq+softirq+steal)); idle1=$((idle+iowait)); ' + 'sleep 1; ' + 'read cpu user nice system idle iowait irq softirq steal guest guest_nice < /proc/stat; ' + 'total2=$((user+nice+system+idle+iowait+irq+softirq+steal)); idle2=$((idle+iowait)); ' + 'dt=$((total2-total1)); di=$((idle2-idle1)); ' + 'cpu_pct=$(awk -v dt="$dt" -v di="$di" "BEGIN { if (dt > 0) printf \"%.1f\", (dt-di)*100/dt; else printf \"0.0\" }"); ' + "mem_total=$(awk '/^MemTotal:/ {print $2}' /proc/meminfo); " + "mem_avail=$(awk '/^MemAvailable:/ {print $2}' /proc/meminfo); " + 'ram_pct=$(awk -v t="$mem_total" -v a="$mem_avail" "BEGIN { if (t > 0) printf \"%.1f\", (t-a)*100/t; else printf \"0.0\" }"); ' + 'printf "%s %s" "$cpu_pct" "$ram_pct"' + ) + output = str(_rt_execute(client_for(profile), "execute.capture", "sh", "-c", script) or "").strip() + parts = output.split() + if len(parts) < 2: + raise RuntimeError(f"Cannot read remote CPU/RAM usage: {output}") + usage = {"cpu": float(parts[0]), "ram": float(parts[1]), "source": "rtorrent-remote", "usage_source": "rtorrent-remote", "cached": False} + _REMOTE_USAGE_CACHE[profile_id] = (now, usage) + return dict(usage) + + +def _statvfs_usage(path: str) -> dict: + stat = os.statvfs(path) + total = int(stat.f_blocks * stat.f_frsize) + free = int(stat.f_bavail * stat.f_frsize) + used = max(0, total - free) + pct = round((used / total) * 100, 1) if total else 0.0 + return { + "ok": True, + "total": total, + "used": used, + "free": free, + "total_h": human_size(total), + "used_h": human_size(used), + "free_h": human_size(free), + "percent": pct, + } + + +def disk_usage_for_default_path(profile: dict) -> dict: + """Filesystem usage for the rTorrent default download directory.""" + path = default_download_path(profile) + cache_key = f"{profile.get('id')}:{path}" + now = time.monotonic() + cached = _DISK_USAGE_CACHE.get(cache_key) + if cached and now - cached[0] < _DISK_USAGE_TTL_SECONDS: + return cached[1] + + try: + usage = _statvfs_usage(path) + usage.update({"path": path, "source_path": path, "fallback": False}) + except Exception as first_exc: + usage = {"ok": False, "path": path, "error": str(first_exc)} + probe = os.path.abspath(path or os.sep) + seen = set() + while probe and probe not in seen: + seen.add(probe) + parent = os.path.dirname(probe) + if parent == probe: + break + probe = parent + try: + usage = _statvfs_usage(probe) + usage.update({"path": path, "source_path": probe, "fallback": True, "warning": str(first_exc)}) + break + except Exception: + continue + _DISK_USAGE_CACHE[cache_key] = (now, usage) + return usage + + +def system_status(profile: dict) -> dict: + c = client_for(profile) + version = str(c.system.client_version()) + try: + down_limit = int(c.throttle.global_down.max_rate()) + except Exception: + down_limit = 0 + try: + up_limit = int(c.throttle.global_up.max_rate()) + except Exception: + up_limit = 0 + rows = list_torrents(profile) + return { + "ok": True, + "version": version, + "total": len(rows), + "active": sum(1 for t in rows if t["state"]), + "seeding": sum(1 for t in rows if t["complete"] and t["state"] and not t.get("paused")), + "leeching": sum(1 for t in rows if not t["complete"] and t["state"] and not t.get("paused")), + "paused": sum(1 for t in rows if t.get("paused")), + "stopped": sum(1 for t in rows if not t["state"]), + "down_rate": sum(t["down_rate"] for t in rows), + "down_rate_h": human_rate(sum(t["down_rate"] for t in rows)), + "up_rate": sum(t["up_rate"] for t in rows), + "up_rate_h": human_rate(sum(t["up_rate"] for t in rows)), + "down_limit": down_limit, + "up_limit": up_limit, + "down_limit_h": human_rate(down_limit) if down_limit else "∞", + "up_limit_h": human_rate(up_limit) if up_limit else "∞", + "total_down": sum(t["down_total"] for t in rows), + "total_up": sum(t["up_total"] for t in rows), + "total_down_h": human_size(sum(t["down_total"] for t in rows)), + "total_up_h": human_size(sum(t["up_total"] for t in rows)), + "disk": disk_usage_for_default_path(profile), + } + + +def scgi_diagnostics(profile: dict) -> dict: + c = client_for(profile) + started = time.perf_counter() + body = dumps((), methodname="system.client_version", allow_none=True).encode("utf-8") + headers = { + "CONTENT_LENGTH": str(len(body)), + "SCGI": "1", + "REQUEST_METHOD": "POST", + "REQUEST_URI": c.path, + "SCRIPT_NAME": c.path, + "SERVER_PROTOCOL": "HTTP/1.1", + "CONTENT_TYPE": "text/xml", + } + header_blob = b"".join(k.encode() + b"\0" + v.encode() + b"\0" for k, v in headers.items()) + payload = str(len(header_blob)).encode("ascii") + b":" + header_blob + b"," + body + metrics = { + "url": profile.get("scgi_url"), + "host": c.host, + "port": c.port, + "path": c.path, + "timeout_seconds": c.timeout, + "request_bytes": len(payload), + } + connect_started = time.perf_counter() + with socket.create_connection((c.host, c.port), timeout=c.timeout) as sock: + sock.settimeout(c.timeout) + metrics["connect_ms"] = round((time.perf_counter() - connect_started) * 1000, 2) + send_started = time.perf_counter() + sock.sendall(payload) + metrics["send_ms"] = round((time.perf_counter() - send_started) * 1000, 2) + chunks: list[bytes] = [] + first_byte_at = None + while True: + chunk = sock.recv(65536) + if chunk and first_byte_at is None: + first_byte_at = time.perf_counter() + if not chunk: + break + chunks.append(chunk) + response = b"".join(chunks) + metrics["response_bytes"] = len(response) + metrics["first_byte_ms"] = round(((first_byte_at or time.perf_counter()) - started) * 1000, 2) + metrics["total_ms"] = round((time.perf_counter() - started) * 1000, 2) + if not response: + raise ConnectionError("Empty response from rTorrent SCGI") + xml_response = response + if b"\r\n\r\n" in xml_response: + xml_response = xml_response.split(b"\r\n\r\n", 1)[1] + elif b"\n\n" in xml_response: + xml_response = xml_response.split(b"\n\n", 1)[1] + result, _ = loads(xml_response) + metrics["xml_bytes"] = len(xml_response) + metrics["client_version"] = str(result[0]) if result else "" + metrics["ok"] = True + return metrics + + +def torrent_files(profile: dict, torrent_hash: str) -> list[dict]: + rows = client_for(profile).f.multicall(torrent_hash, "", "f.path=", "f.size_bytes=", "f.completed_chunks=", "f.size_chunks=", "f.priority=") + files = [] + for idx, r in enumerate(rows): + size = int(r[1] or 0) + completed_chunks = int(r[2] or 0) + size_chunks = int(r[3] or 0) + progress = 100.0 if size <= 0 else round((completed_chunks / size_chunks) * 100, 2) if size_chunks else 0.0 + files.append({ + "index": idx, + "path": r[0], + "size": size, + "size_h": human_size(size), + "completed_chunks": completed_chunks, + "size_chunks": size_chunks, + "progress": min(100.0, max(0.0, progress)), + "priority": int(r[4] or 0), + }) + return files + + +def set_file_priorities(profile: dict, torrent_hash: str, files: list[dict]) -> dict: + c = client_for(profile) + updated: list[dict] = [] + errors: list[dict] = [] + valid_priorities = {0, 1, 2} + for item in files or []: + try: + index = int(item.get("index")) + priority = int(item.get("priority")) + except Exception: + errors.append({"item": item, "error": "Invalid file index or priority"}) + continue + if index < 0 or priority not in valid_priorities: + errors.append({"index": index, "priority": priority, "error": "Priority must be 0, 1 or 2"}) + continue + file_target = f"{torrent_hash}:f{index}" + try: + c.call("f.priority.set", file_target, priority) + updated.append({"index": index, "priority": priority}) + continue + except Exception as first_exc: + try: + c.call("f.set_priority", torrent_hash, index, priority) + updated.append({"index": index, "priority": priority}) + continue + except Exception as second_exc: + errors.append({"index": index, "priority": priority, "error": f"{first_exc}; fallback: {second_exc}"}) + if updated: + try: + c.call("d.update_priorities", torrent_hash) + except Exception: + pass + return {"updated": updated, "errors": errors} + +def torrent_peers(profile: dict, torrent_hash: str) -> list[dict]: + fields = [ + "p.address=", "p.client_version=", "p.completed_percent=", "p.down_rate=", + "p.up_rate=", "p.port=", "p.is_encrypted=", "p.is_incoming=", + "p.is_snubbed=", "p.is_banned=", + ] + try: + rows = client_for(profile).p.multicall(torrent_hash, "", *fields) + except Exception: + fields = ["p.address=", "p.client_version=", "p.completed_percent=", "p.down_rate=", "p.up_rate=", "p.port=", "p.is_encrypted="] + rows = client_for(profile).p.multicall(torrent_hash, "", *fields) + peers = [] + for idx, r in enumerate(rows): + peers.append({ + "index": idx, + "ip": r[0], + "client": r[1], + "completed": int(r[2] or 0), + "down_rate": int(r[3] or 0), + "down_rate_h": human_rate(r[3] or 0), + "up_rate": int(r[4] or 0), + "up_rate_h": human_rate(r[4] or 0), + "port": int(r[5] or 0), + "encrypted": bool(r[6]) if len(r) > 6 else False, + "incoming": bool(r[7]) if len(r) > 7 else False, + "snubbed": bool(r[8]) if len(r) > 8 else False, + "banned": bool(r[9]) if len(r) > 9 else False, + }) + return peers + + +def peer_action(profile: dict, torrent_hash: str, peer_index: int, action_name: str) -> dict: + if peer_index < 0: + raise ValueError("Invalid peer index") + methods = { + "disconnect": ["p.disconnect", "p.close"], + "kick": ["p.disconnect", "p.close"], + "snub": ["p.snub"], + "unsnub": ["p.unsnub"], + "ban": ["p.ban", "p.disconnect"], + } + candidates = methods.get(action_name) + if not candidates: + raise ValueError(f"Unknown peer action: {action_name}") + c = client_for(profile) + target = f"{torrent_hash}:p{int(peer_index)}" + errors = [] + for method in candidates: + try: + c.call(method, target) + return {"ok": True, "action": action_name, "method": method, "peer_index": peer_index} + except Exception as exc: + errors.append(f"{method}: {exc}") + raise RuntimeError("; ".join(errors)) + + + + +def _call_first(c: ScgiRtorrentClient, candidates: list[tuple[str, tuple]]) -> dict: + errors = [] + for method, args in candidates: + try: + result = c.call(method, *args) + return {"ok": True, "method": method, "result": result} + except Exception as exc: + errors.append(f"{method}: {exc}") + raise RuntimeError("; ".join(errors)) + + +def _safe_tracker_call(c: ScgiRtorrentClient, method: str, target: str, default=None): + try: + return c.call(method, target) + except Exception: + return default + + +def _tracker_target(torrent_hash: str, index: int) -> str: + return f"{torrent_hash}:t{int(index)}" + +def _tracker_int(value, default=None): + try: + if value is None or value == "": + return default + return int(value) + except Exception: + return default + + +def torrent_trackers(profile: dict, torrent_hash: str) -> list[dict]: + c = client_for(profile) + rows = c.t.multicall(torrent_hash, "", "t.url=", "t.is_enabled=", "t.scrape_complete=", "t.scrape_incomplete=", "t.scrape_downloaded=") + trackers = [] + for idx, r in enumerate(rows): + target = _tracker_target(torrent_hash, idx) + last_announce = _safe_tracker_call(c, "t.activity_time_last", target, 0) + scrape_time = _safe_tracker_call(c, "t.scrape_time_last", target, 0) + if not last_announce: + last_announce = scrape_time + next_announce = _safe_tracker_call(c, "t.activity_time_next", target, 0) + raw_seeds = _tracker_int(r[2], None) + raw_peers = _tracker_int(r[3], None) + raw_downloaded = _tracker_int(r[4], None) + has_scrape = bool(_tracker_int(scrape_time, 0)) or raw_seeds not in (None, 0) or raw_peers not in (None, 0) or raw_downloaded not in (None, 0) + trackers.append({ + "index": idx, + "url": str(r[0] or ""), + "enabled": bool(r[1]), + "seeds": raw_seeds if has_scrape else None, + "peers": raw_peers if has_scrape else None, + "downloaded": raw_downloaded if has_scrape else None, + "has_scrape": has_scrape, + "last_announce": int(last_announce or 0), + "next_announce": int(next_announce or 0), + }) + return trackers + +def tracker_action(profile: dict, torrent_hash: str, action_name: str, payload: dict | None = None) -> dict: + payload = payload or {} + c = client_for(profile) + if action_name == "reannounce": + return _call_first(c, [ + ("d.tracker_announce", (torrent_hash,)), + ("d.tracker_announce", ("", torrent_hash)), + ("d.tracker_announce.force", (torrent_hash,)), + ]) + if action_name == "add": + url = str(payload.get("url") or "").strip() + if not url: + raise ValueError("Missing tracker URL") + return _call_first(c, [ + ("d.tracker.insert", (torrent_hash, "", url)), + ("d.tracker.insert", (torrent_hash, 0, url)), + ("d.tracker.insert", ("", torrent_hash, "", url)), + ]) + if action_name == "edit": + url = str(payload.get("url") or "").strip() + index = int(payload.get("index", -1)) + if index < 0: + raise ValueError("Invalid tracker index") + if not url: + raise ValueError("Missing tracker URL") + target = _tracker_target(torrent_hash, index) + return _call_first(c, [ + ("t.url.set", (target, url)), + ("t.url.set", ("", target, url)), + ]) + raise ValueError(f"Unknown tracker action: {action_name}") + + +RTORRENT_CONFIG_FIELDS = [ + {"group": "Directories", "key": "directory.default", "label": "Default download directory", "type": "text"}, + {"group": "Directories", "key": "session.path", "label": "Session path", "type": "text"}, + {"group": "Directories", "key": "system.cwd", "label": "Working directory", "type": "text", "readonly": True}, + {"group": "Network", "key": "network.port_range", "label": "Incoming port range", "type": "text", "placeholder": "49164-49164"}, + {"group": "Network", "key": "network.port_random", "label": "Random incoming port", "type": "bool"}, + {"group": "Network", "key": "network.bind_address", "label": "Bind address", "type": "text", "placeholder": "0.0.0.0"}, + {"group": "Network", "key": "network.local_address", "label": "Local address", "type": "text"}, + {"group": "Network", "key": "network.max_open_files", "label": "Max open files", "type": "number"}, + {"group": "Network", "key": "network.max_open_sockets", "label": "Max open sockets", "type": "number"}, + {"group": "Network", "key": "network.http.max_open", "label": "Max HTTP connections", "type": "number"}, + {"group": "Network", "key": "network.http.ssl_verify_peer", "label": "Verify SSL peers", "type": "bool"}, + {"group": "Peers", "key": "throttle.min_peers.normal", "label": "Min peers downloading", "type": "number"}, + {"group": "Peers", "key": "throttle.max_peers.normal", "label": "Max peers downloading", "type": "number"}, + {"group": "Peers", "key": "throttle.min_peers.seed", "label": "Min peers seeding", "type": "number"}, + {"group": "Peers", "key": "throttle.max_peers.seed", "label": "Max peers seeding", "type": "number"}, + {"group": "Peers", "key": "trackers.numwant", "label": "Tracker numwant", "type": "number"}, + {"group": "Throttle", "key": "throttle.global_down.max_rate", "label": "Global download limit B/s", "type": "number"}, + {"group": "Throttle", "key": "throttle.global_up.max_rate", "label": "Global upload limit B/s", "type": "number"}, + {"group": "Throttle", "key": "throttle.max_downloads.global", "label": "Max active downloads", "type": "number"}, + {"group": "Throttle", "key": "throttle.max_uploads.global", "label": "Max active uploads", "type": "number"}, + {"group": "Throttle", "key": "throttle.max_downloads.div", "label": "Max downloads per throttle", "type": "number"}, + {"group": "Throttle", "key": "throttle.max_uploads.div", "label": "Max uploads per throttle", "type": "number"}, + {"group": "DHT / PEX", "key": "dht.mode", "label": "DHT mode", "type": "text", "placeholder": "disable/off/auto/on"}, + {"group": "DHT / PEX", "key": "dht.port", "label": "DHT port", "type": "number"}, + {"group": "DHT / PEX", "key": "protocol.pex", "label": "Peer exchange", "type": "bool"}, + {"group": "Protocol", "key": "protocol.encryption.set", "label": "Encryption flags", "type": "text", "placeholder": "allow_incoming,try_outgoing,enable_retry"}, + {"group": "Protocol", "key": "protocol.connection.leech", "label": "Leech connection type", "type": "text", "placeholder": "leech"}, + {"group": "Protocol", "key": "protocol.connection.seed", "label": "Seed connection type", "type": "text", "placeholder": "seed"}, + {"group": "Files", "key": "pieces.hash.on_completion", "label": "Hash check on completion", "type": "bool"}, + {"group": "Files", "key": "pieces.preload.type", "label": "Pieces preload type", "type": "number"}, + {"group": "Files", "key": "pieces.preload.min_size", "label": "Pieces preload min size", "type": "number"}, + {"group": "Files", "key": "pieces.preload.min_rate", "label": "Pieces preload min rate", "type": "number"}, + {"group": "Files", "key": "system.file.allocate", "label": "File allocation", "type": "number"}, + {"group": "Files", "key": "system.file.max_size", "label": "Max file size", "type": "number"}, + {"group": "System", "key": "system.umask", "label": "File umask", "type": "text", "placeholder": "0002"}, + {"group": "System", "key": "system.hostname", "label": "Hostname", "type": "text", "readonly": True}, + {"group": "System", "key": "system.client_version", "label": "Client version", "type": "text", "readonly": True}, + {"group": "System", "key": "system.library_version", "label": "Library version", "type": "text", "readonly": True}, +] + + +def _normalize_config_value(meta: dict, value): + if meta.get("type") == "bool": + return "1" if str(value).lower() in {"1", "true", "yes", "on"} or value is True else "0" + if meta.get("type") == "number": + return str(int(value or 0)) + return str(value or "").strip() + + +def saved_config_overrides(profile_id: int, user_id: int | None = None) -> dict[str, dict]: + user_id = user_id or default_user_id() + with connect() as conn: + rows = conn.execute( + "SELECT key,value,baseline_value,apply_on_start,updated_at FROM rtorrent_config_overrides WHERE user_id=? AND profile_id=?", + (user_id, int(profile_id)), + ).fetchall() + return {r["key"]: r for r in rows} + + +def get_config(profile: dict) -> dict: + c = client_for(profile) + saved = saved_config_overrides(int(profile["id"])) + fields = [] + for meta in RTORRENT_CONFIG_FIELDS: + item = dict(meta) + saved_item = saved.get(meta["key"]) + try: + item["value"] = _normalize_config_value(meta, c.call(meta["key"])) + item["current_value"] = item["value"] + item["ok"] = True + except Exception as exc: + item["value"] = "" + item["current_value"] = "" + item["ok"] = False + item["error"] = str(exc) + if saved_item: + saved_value = _normalize_config_value(meta, saved_item.get("value")) + baseline_raw = saved_item.get("baseline_value") + if baseline_raw not in (None, ""): + baseline_value = _normalize_config_value(meta, baseline_raw) + else: + baseline_value = _normalize_config_value(meta, item.get("current_value")) + item["saved"] = True + item["saved_value"] = saved_value + item["baseline_value"] = baseline_value + item["apply_on_start"] = bool(saved_item.get("apply_on_start")) + item["changed"] = saved_value != baseline_value + fields.append(item) + return {"fields": fields, "apply_on_start": any(bool(v.get("apply_on_start")) for v in saved.values())} + + + +def default_download_path(profile: dict) -> str: + """Return rTorrent default download directory for the active profile.""" + c = client_for(profile) + errors = [] + for method in ("directory.default", "system.cwd"): + try: + value = str(c.call(method) or "").strip() + if value: + return value + except Exception as exc: + errors.append(f"{method}: {exc}") + raise RuntimeError("Cannot read rTorrent default download directory: " + "; ".join(errors)) + +def generate_config_text(values: dict) -> str: + known = {f["key"]: f for f in RTORRENT_CONFIG_FIELDS} + lines = [] + for key, value in (values or {}).items(): + meta = known.get(key) + if not meta or meta.get("readonly"): + continue + normalized = _normalize_config_value(meta, value) + if meta.get("type") == "text" and any(ch.isspace() for ch in normalized): + normalized = '"' + normalized.replace('\\', '\\\\').replace('"', '\\"') + '"' + lines.append(f"{key}.set = {normalized}") + return "\n".join(lines) + ("\n" if lines else "") + + +def _read_rtorrent_config_value(client, key: str, meta: dict) -> str: + return _normalize_config_value(meta, client.call(key)) + + +def store_config_overrides(profile: dict, values: dict, apply_on_start: bool, baseline_values: dict | None = None, clear_keys: list[str] | None = None) -> list[str]: + known = {f["key"]: f for f in RTORRENT_CONFIG_FIELDS} + user_id = default_user_id() + now = utcnow() + profile_id = int(profile["id"]) + baseline_values = baseline_values or {} + clear_set = set(clear_keys or []) + stored = [] + with connect() as conn: + for key in clear_set: + if key in known: + conn.execute( + "DELETE FROM rtorrent_config_overrides WHERE user_id=? AND profile_id=? AND key=?", + (user_id, profile_id, key), + ) + for key, value in (values or {}).items(): + if key in clear_set: + continue + meta = known.get(key) + if not meta or meta.get("readonly"): + continue + normalized = _normalize_config_value(meta, value) + existing = conn.execute( + "SELECT baseline_value FROM rtorrent_config_overrides WHERE user_id=? AND profile_id=? AND key=?", + (user_id, profile_id, key), + ).fetchone() + existing_baseline = existing.get("baseline_value") if existing else None + + # Keep the first reference value forever until the override is cleared. + # Without this, a second save could treat already-overridden rTorrent + # values as the new baseline and the UI would stop marking them as changed. + if existing_baseline not in (None, ""): + baseline = _normalize_config_value(meta, existing_baseline) + else: + baseline = _normalize_config_value(meta, baseline_values.get(key)) if key in baseline_values else None + + if baseline not in (None, "") and normalized == baseline: + conn.execute( + "DELETE FROM rtorrent_config_overrides WHERE user_id=? AND profile_id=? AND key=?", + (user_id, profile_id, key), + ) + continue + conn.execute( + "INSERT OR REPLACE INTO rtorrent_config_overrides(user_id,profile_id,key,value,baseline_value,apply_on_start,updated_at) VALUES(?,?,?,?,?,?,?)", + (user_id, profile_id, key, normalized, baseline, 1 if apply_on_start else 0, now), + ) + stored.append(key) + conn.execute( + "UPDATE rtorrent_config_overrides SET apply_on_start=?, updated_at=? WHERE user_id=? AND profile_id=?", + (1 if apply_on_start else 0, now, user_id, profile_id), + ) + return stored + + +def set_config(profile: dict, values: dict, apply_now: bool = True, apply_on_start: bool = False, clear_keys: list[str] | None = None) -> dict: + updated, errors = [], [] + known = {f["key"]: f for f in RTORRENT_CONFIG_FIELDS} + c = client_for(profile) + baseline_values = {} + for key, raw_value in (values or {}).items(): + meta = known.get(key) + if not meta or meta.get("readonly"): + continue + try: + baseline_values[key] = _read_rtorrent_config_value(c, key, meta) + except Exception: + pass + stored = store_config_overrides(profile, values, apply_on_start, baseline_values, clear_keys) + if not apply_now: + return {"ok": True, "updated": [], "stored": stored, "errors": []} + for key, raw_value in (values or {}).items(): + if key not in known: + continue + meta = known[key] + if meta.get("readonly"): + continue + value = _normalize_config_value(meta, raw_value) + rpc_value = int(value) if meta.get("type") in {"bool", "number"} else value + try: + try: + c.call(key + ".set", "", rpc_value) + except Exception: + c.call(key + ".set", rpc_value) + updated.append(key) + except Exception as exc: + errors.append({"key": key, "error": str(exc)}) + return {"ok": not errors, "updated": updated, "stored": stored, "errors": errors} + + +def apply_startup_overrides(profile: dict) -> dict: + rows = saved_config_overrides(int(profile["id"])) + values = {k: v.get("value") for k, v in rows.items() if v.get("apply_on_start")} + if not values: + return {"ok": True, "updated": [], "errors": [], "skipped": True} + return set_config(profile, values, apply_now=True, apply_on_start=True) + +def action(profile: dict, torrent_hashes: list[str], name: str, payload: dict | None = None) -> dict: + payload = payload or {} + c = client_for(profile) + methods = { + "start": "d.start", + "pause": "d.pause", + "stop": "d.stop", + "resume": "d.resume", + "recheck": "d.check_hash", + "reannounce": "d.tracker_announce", + "remove": "d.erase", + } + if name == "set_label": + label = str(payload.get("label") or "").strip() + for h in torrent_hashes: + c.call("d.custom1.set", h, label) + return {"ok": True, "count": len(torrent_hashes), "label": label} + if name == "set_ratio_group": + group = str(payload.get("ratio_group") or "").strip() + for h in torrent_hashes: + c.call("d.custom.set", h, "py_ratio_group", group) + return {"ok": True, "count": len(torrent_hashes), "ratio_group": group} + if name == "move": + path = _remote_clean_path(payload.get("path") or "") + move_data = bool(payload.get("move_data")) + recheck = bool(payload.get("recheck", move_data)) + if not path: + raise ValueError("Missing path") + results = [] + if move_data: + _rt_execute_allow_timeout(c, "execute.throw", "mkdir", "-p", path) + for h in torrent_hashes: + item = {"hash": h, "path": path, "move_data": move_data} + try: + was_state = int(c.call("d.state", h) or 0) + except Exception: + was_state = 0 + try: + was_active = int(c.call("d.is_active", h) or 0) + except Exception: + was_active = was_state + if move_data: + src = _remote_clean_path(_torrent_data_path(c, h)) + if not src: + raise ValueError(f"Cannot determine source path for {h}") + dst = _remote_join(path, posixpath.basename(src.rstrip("/"))) + if src != dst: + try: + c.call("d.stop", h) + except Exception: + pass + try: + c.call("d.close", h) + except Exception: + pass + _run_remote_move(c, src, dst) + item["moved_from"] = src + item["moved_to"] = dst + else: + item["skipped"] = "source and destination are the same" + c.call("d.directory.set", h, path) + if recheck: + try: + c.call("d.check_hash", h) + except Exception as exc: + item["recheck_error"] = str(exc) + if was_state or was_active: + try: + c.call("d.start", h) + except Exception: + pass + else: + c.call("d.directory.set", h, path) + results.append(item) + return {"ok": True, "count": len(torrent_hashes), "move_data": move_data, "results": results} + method = methods.get(name) + if not method: + raise ValueError(f"Unknown action: {name}") + remove_data = bool(payload.get("remove_data")) if name == "remove" else False + results = [] + for h in torrent_hashes: + if remove_data: + results.append(_remove_torrent_data(c, h)) + c.call(method, h) + return {"ok": True, "count": len(torrent_hashes), "remove_data": remove_data, "results": results} + +def add_magnet(profile: dict, uri: str, start: bool = True, directory: str = "", label: str = "") -> dict: + c = client_for(profile) + commands = [] + if directory: + commands.append(f"d.directory.set={directory}") + if label: + commands.append(f"d.custom1.set={label}") + if start: + c.load.start_verbose("", uri, *commands) + else: + c.load.normal("", uri, *commands) + return {"ok": True} + + +def set_limits(profile: dict, down: int | None, up: int | None): + """Set global speed limits in bytes/s. + + rTorrent XML-RPC setters need an empty target string as the first + argument. Without it rTorrent returns: target must be a string. + """ + c = client_for(profile) + if down is not None: + c.call("throttle.global_down.max_rate.set", "", int(down)) + if up is not None: + c.call("throttle.global_up.max_rate.set", "", int(up)) + return {"ok": True, "down": int(down or 0), "up": int(up or 0)} + + +def add_torrent_raw(profile: dict, data: bytes, start: bool = True, directory: str = "", label: str = "") -> dict: + c = client_for(profile) + commands = [] + if directory: + commands.append(f"d.directory.set={directory}") + if label: + commands.append(f"d.custom1.set={label}") + method = "load.raw_start" if start else "load.raw" + c.call(method, "", Binary(data), *commands) + return {"ok": True} diff --git a/pytorrent/services/smart_queue.py b/pytorrent/services/smart_queue.py new file mode 100644 index 0000000..f7dd6b9 --- /dev/null +++ b/pytorrent/services/smart_queue.py @@ -0,0 +1,312 @@ +from __future__ import annotations + +from datetime import datetime, timezone +from typing import Any +import json +import time + +from ..config import SMART_QUEUE_LABEL +from ..db import connect, default_user_id, utcnow +from . import rtorrent +from .preferences import active_profile, get_profile + + +def _ts(value: str | None) -> float: + if not value: + return 0.0 + try: + return datetime.fromisoformat(value.replace('Z', '+00:00')).timestamp() + except Exception: + return 0.0 + + +def _default_settings(user_id: int, profile_id: int) -> dict[str, Any]: + return { + 'user_id': user_id, + 'profile_id': profile_id, + 'enabled': 0, + 'max_active_downloads': 5, + 'stalled_seconds': 300, + 'min_speed_bytes': 1024, + 'min_seeds': 1, + 'updated_at': utcnow(), + } + + +def get_settings(profile_id: int, user_id: int | None = None) -> dict[str, Any]: + user_id = user_id or default_user_id() + with connect() as conn: + row = conn.execute( + 'SELECT * FROM smart_queue_settings WHERE user_id=? AND profile_id=?', + (user_id, profile_id), + ).fetchone() + return row or _default_settings(user_id, profile_id) + + +def save_settings(profile_id: int, data: dict[str, Any], user_id: int | None = None) -> dict[str, Any]: + user_id = user_id or default_user_id() + current = get_settings(profile_id, user_id) + settings = { + 'enabled': 1 if data.get('enabled', current.get('enabled')) else 0, + 'max_active_downloads': max(1, int(data.get('max_active_downloads') or current.get('max_active_downloads') or 5)), + 'stalled_seconds': max(30, int(data.get('stalled_seconds') or current.get('stalled_seconds') or 300)), + 'min_speed_bytes': max(0, int(data.get('min_speed_bytes') or current.get('min_speed_bytes') or 0)), + 'min_seeds': max(0, int(data.get('min_seeds') or current.get('min_seeds') or 0)), + } + now = utcnow() + with connect() as conn: + conn.execute( + '''INSERT INTO smart_queue_settings(user_id,profile_id,enabled,max_active_downloads,stalled_seconds,min_speed_bytes,min_seeds,updated_at) + VALUES(?,?,?,?,?,?,?,?) + ON CONFLICT(user_id, profile_id) DO UPDATE SET + enabled=excluded.enabled, + max_active_downloads=excluded.max_active_downloads, + stalled_seconds=excluded.stalled_seconds, + min_speed_bytes=excluded.min_speed_bytes, + min_seeds=excluded.min_seeds, + updated_at=excluded.updated_at''', + (user_id, profile_id, settings['enabled'], settings['max_active_downloads'], settings['stalled_seconds'], settings['min_speed_bytes'], settings['min_seeds'], now), + ) + return get_settings(profile_id, user_id) + + +def list_exclusions(profile_id: int, user_id: int | None = None) -> list[dict[str, Any]]: + user_id = user_id or default_user_id() + with connect() as conn: + return conn.execute( + 'SELECT * FROM smart_queue_exclusions WHERE user_id=? AND profile_id=? ORDER BY created_at DESC', + (user_id, profile_id), + ).fetchall() + + +def set_exclusion(profile_id: int, torrent_hash: str, excluded: bool, reason: str = '', user_id: int | None = None) -> None: + user_id = user_id or default_user_id() + now = utcnow() + with connect() as conn: + if excluded: + conn.execute( + 'INSERT OR REPLACE INTO smart_queue_exclusions(user_id,profile_id,torrent_hash,reason,created_at) VALUES(?,?,?,?,?)', + (user_id, profile_id, torrent_hash, reason, now), + ) + else: + conn.execute( + 'DELETE FROM smart_queue_exclusions WHERE user_id=? AND profile_id=? AND torrent_hash=?', + (user_id, profile_id, torrent_hash), + ) + + + +def add_history(profile_id: int, event: str, paused: list[str] | None = None, resumed: list[str] | None = None, checked: int = 0, details: dict[str, Any] | None = None, user_id: int | None = None) -> None: + user_id = user_id or default_user_id() + paused = paused or [] + resumed = resumed or [] + details = details or {} + with connect() as conn: + conn.execute( + 'INSERT INTO smart_queue_history(user_id,profile_id,event,paused_count,resumed_count,checked_count,details_json,created_at) VALUES(?,?,?,?,?,?,?,?)', + (user_id, profile_id, event, len(paused), len(resumed), int(checked or 0), json.dumps({**details, 'paused': paused, 'resumed': resumed}), utcnow()), + ) + +def list_history(profile_id: int, user_id: int | None = None, limit: int = 30) -> list[dict[str, Any]]: + user_id = user_id or default_user_id() + with connect() as conn: + return conn.execute( + 'SELECT * FROM smart_queue_history WHERE user_id=? AND profile_id=? ORDER BY created_at DESC LIMIT ?', + (user_id, profile_id, max(1, min(int(limit or 30), 100))), + ).fetchall() + +def count_history(profile_id: int, user_id: int | None = None) -> int: + user_id = user_id or default_user_id() + with connect() as conn: + row = conn.execute( + 'SELECT COUNT(*) AS count FROM smart_queue_history WHERE user_id=? AND profile_id=?', + (user_id, profile_id), + ).fetchone() + return int((row or {}).get('count') or 0) + +def _excluded_hashes(profile_id: int, user_id: int) -> set[str]: + return {r['torrent_hash'] for r in list_exclusions(profile_id, user_id)} + + +def _remember_auto_label(profile_id: int, torrent_hash: str, previous_label: str) -> None: + now = utcnow() + with connect() as conn: + row = conn.execute( + 'SELECT previous_label FROM smart_queue_auto_labels WHERE profile_id=? AND torrent_hash=?', + (profile_id, torrent_hash), + ).fetchone() + if row: + conn.execute( + 'UPDATE smart_queue_auto_labels SET updated_at=? WHERE profile_id=? AND torrent_hash=?', + (now, profile_id, torrent_hash), + ) + else: + conn.execute( + 'INSERT INTO smart_queue_auto_labels(profile_id,torrent_hash,previous_label,created_at,updated_at) VALUES(?,?,?,?,?)', + (profile_id, torrent_hash, previous_label, now, now), + ) + + +def _restore_auto_label(client: Any, profile_id: int, torrent_hash: str, current_label: str | None = None) -> bool: + with connect() as conn: + row = conn.execute( + 'SELECT previous_label FROM smart_queue_auto_labels WHERE profile_id=? AND torrent_hash=?', + (profile_id, torrent_hash), + ).fetchone() + if not row: + return False + previous = row.get('previous_label') or '' + try: + if current_label is None or current_label == SMART_QUEUE_LABEL: + client.call('d.custom1.set', torrent_hash, previous) + conn.execute('DELETE FROM smart_queue_auto_labels WHERE profile_id=? AND torrent_hash=?', (profile_id, torrent_hash)) + return True + except Exception: + return False + + +def _set_smart_queue_label(client: Any, torrent_hash: str, attempts: int = 3) -> bool: + for attempt in range(max(1, attempts)): + try: + client.call('d.custom1.set', torrent_hash, SMART_QUEUE_LABEL) + return True + except Exception: + if attempt < attempts - 1: + time.sleep(0.05) + return False + + +def _mark_auto_paused(client: Any, profile_id: int, torrent: dict[str, Any]) -> bool: + torrent_hash = str(torrent.get('hash') or '') + if not torrent_hash: + return False + previous = str(torrent.get('label') or '') + if previous != SMART_QUEUE_LABEL: + _remember_auto_label(profile_id, torrent_hash, previous) + return _set_smart_queue_label(client, torrent_hash) + + +def _cleanup_auto_labels(client: Any, profile_id: int, torrents: list[dict[str, Any]], keep_hashes: set[str]) -> list[str]: + by_hash = {str(t.get('hash') or ''): t for t in torrents} + restored: list[str] = [] + with connect() as conn: + rows = conn.execute('SELECT torrent_hash FROM smart_queue_auto_labels WHERE profile_id=?', (profile_id,)).fetchall() + for row in rows: + h = str(row.get('torrent_hash') or '') + t = by_hash.get(h) + if not h or h in keep_hashes: + continue + if t is None or int(t.get('complete') or 0): + if _restore_auto_label(client, profile_id, h, None if t is None else str(t.get('label') or '')): + restored.append(h) + continue + is_paused_or_stopped = bool(t.get('paused')) or not int(t.get('active') or 0) or not int(t.get('state') or 0) + current_label = str(t.get('label') or '') + if is_paused_or_stopped: + if current_label != SMART_QUEUE_LABEL: + _set_smart_queue_label(client, h) + continue + if _restore_auto_label(client, profile_id, h, current_label): + restored.append(h) + return restored + + +def check(profile: dict | None = None, user_id: int | None = None, force: bool = False) -> dict[str, Any]: + profile = profile or active_profile() + if not profile: + return {'ok': False, 'error': 'No active rTorrent profile'} + user_id = user_id or default_user_id() + profile_id = int(profile['id']) + settings = get_settings(profile_id, user_id) + if not force and not int(settings.get('enabled') or 0): + add_history(profile_id, 'skipped_disabled', [], [], 0, {'enabled': False}, user_id) + return {'ok': True, 'enabled': False, 'paused': [], 'resumed': [], 'message': 'Smart Queue disabled'} + + torrents = rtorrent.list_torrents(profile) + excluded = _excluded_hashes(profile_id, user_id) + downloading = [t for t in torrents if not int(t.get('complete') or 0) and int(t.get('state') or 0) and not t.get('paused') and t.get('hash') not in excluded] + stopped = [t for t in torrents if not int(t.get('complete') or 0) and (not int(t.get('state') or 0) or t.get('paused')) and t.get('hash') not in excluded] + min_speed = int(settings.get('min_speed_bytes') or 0) + min_seeds = int(settings.get('min_seeds') or 0) + stalled_seconds = int(settings.get('stalled_seconds') or 300) + now = utcnow() + now_ts = datetime.now(timezone.utc).timestamp() + stalled: list[dict[str, Any]] = [] + + with connect() as conn: + for t in downloading: + is_stalled = int(t.get('down_rate') or 0) <= min_speed and int(t.get('seeds') or 0) <= min_seeds + h = t.get('hash') + if not h: + continue + if is_stalled: + row = conn.execute('SELECT first_stalled_at FROM smart_queue_stalled WHERE profile_id=? AND torrent_hash=?', (profile_id, h)).fetchone() + if row: + conn.execute('UPDATE smart_queue_stalled SET updated_at=? WHERE profile_id=? AND torrent_hash=?', (now, profile_id, h)) + first = row['first_stalled_at'] + else: + first = now + conn.execute('INSERT OR REPLACE INTO smart_queue_stalled(profile_id,torrent_hash,first_stalled_at,updated_at) VALUES(?,?,?,?)', (profile_id, h, first, now)) + if now_ts - _ts(first) >= stalled_seconds: + stalled.append(t) + else: + conn.execute('DELETE FROM smart_queue_stalled WHERE profile_id=? AND torrent_hash=?', (profile_id, h)) + + # Candidates with visible sources are preferred. Do not touch excluded torrents. + candidates = sorted( + stopped, + key=lambda t: (int(t.get('seeds') or 0), int(t.get('peers') or 0), int(t.get('down_rate') or 0)), + reverse=True, + ) + max_active = max(1, int(settings.get('max_active_downloads') or 5)) + stalled_hashes = {str(t.get('hash') or '') for t in stalled} + + # Enforce the hard active-download cap first. The previous logic only limited + # newly resumed torrents, so already-active downloads could stay above the limit. + pause_rank = sorted( + downloading, + key=lambda t: ( + 0 if str(t.get('hash') or '') in stalled_hashes else 1, + int(t.get('down_rate') or 0), + int(t.get('seeds') or 0), + int(t.get('peers') or 0), + ), + ) + to_pause: list[dict[str, Any]] = pause_rank[:max(0, len(downloading) - max_active)] + pause_hashes = {str(t.get('hash') or '') for t in to_pause} + + # When the cap is not exceeded, stalled downloads can still be rotated out + # one-for-one with better stopped candidates while staying within max_active. + if candidates: + replaceable_stalled = [t for t in stalled if str(t.get('hash') or '') not in pause_hashes] + for t in replaceable_stalled[:max(0, len(candidates) - len(to_pause))]: + to_pause.append(t) + pause_hashes.add(str(t.get('hash') or '')) + + active_after_pause = max(0, len(downloading) - len(to_pause)) + available_slots = max(0, max_active - active_after_pause) + to_resume = candidates[:available_slots] + + c = rtorrent.client_for(profile) + paused: list[str] = [] + resumed: list[str] = [] + label_failed: list[str] = [] + for t in to_pause: + try: + c.call('d.pause', t['hash']) + if not _mark_auto_paused(c, profile_id, t): + label_failed.append(t['hash']) + paused.append(t['hash']) + except Exception: + pass + for t in to_resume: + try: + _restore_auto_label(c, profile_id, t['hash'], str(t.get('label') or '')) + c.call('d.resume', t['hash']) + c.call('d.start', t['hash']) + resumed.append(t['hash']) + except Exception: + pass + restored = _cleanup_auto_labels(c, profile_id, torrents, set(paused)) + add_history(profile_id, 'force_check' if force else 'auto_check', paused, resumed, len(torrents), {'excluded': len(excluded), 'enabled': bool(settings.get('enabled')), 'auto_label': SMART_QUEUE_LABEL, 'labels_restored': restored, 'labels_failed': label_failed, 'max_active_downloads': max_active, 'active_before': len(downloading), 'active_after': active_after_pause + len(resumed)}, user_id) + return {'ok': True, 'enabled': bool(settings.get('enabled')), 'paused': paused, 'resumed': resumed, 'labels_restored': restored, 'labels_failed': label_failed, 'checked': len(torrents), 'excluded': len(excluded), 'settings': settings} diff --git a/pytorrent/services/startup_config.py b/pytorrent/services/startup_config.py new file mode 100644 index 0000000..9075026 --- /dev/null +++ b/pytorrent/services/startup_config.py @@ -0,0 +1,26 @@ +from __future__ import annotations + +from time import sleep +from . import preferences, rtorrent + +_started = False + + +def schedule_startup_config_apply(socketio, delay_seconds: int = 60) -> None: + """Apply saved rTorrent UI overrides after pyTorrent has been running for a moment.""" + global _started + if _started: + return + _started = True + + def runner(): + sleep(max(0, int(delay_seconds))) + try: + for profile in preferences.list_profiles(): + result = rtorrent.apply_startup_overrides(profile) + if not result.get("skipped"): + socketio.emit("rtorrent_config_applied", {"profile_id": profile["id"], "result": result}) + except Exception as exc: + socketio.emit("rtorrent_config_applied", {"ok": False, "error": str(exc)}) + + socketio.start_background_task(runner) diff --git a/pytorrent/services/torrent_cache.py b/pytorrent/services/torrent_cache.py new file mode 100644 index 0000000..c78276f --- /dev/null +++ b/pytorrent/services/torrent_cache.py @@ -0,0 +1,55 @@ +from __future__ import annotations + +from threading import RLock +from time import time +from . import rtorrent + +_VOLATILE = {"down_rate", "down_rate_h", "up_rate", "up_rate_h", "progress", "completed_bytes", "peers", "seeds", "ratio", "state", "status", "message", "down_total", "down_total_h", "up_total", "up_total_h"} + + +class TorrentCache: + def __init__(self): + self._lock = RLock() + self._data: dict[int, dict[str, dict]] = {} + self._errors: dict[int, str] = {} + self._updated_at: dict[int, float] = {} + + def snapshot(self, profile_id: int) -> list[dict]: + with self._lock: + return list(self._data.get(profile_id, {}).values()) + + def error(self, profile_id: int) -> str: + with self._lock: + return self._errors.get(profile_id, "") + + def refresh(self, profile: dict) -> dict: + profile_id = int(profile["id"]) + try: + rows = rtorrent.list_torrents(profile) + fresh = {t["hash"]: t for t in rows} + with self._lock: + old = self._data.get(profile_id, {}) + added = [v for h, v in fresh.items() if h not in old] + removed = [h for h in old.keys() if h not in fresh] + updated = [] + for h, new in fresh.items(): + prev = old.get(h) + if not prev: + continue + patch = {"hash": h} + for key, value in new.items(): + if prev.get(key) != value: + patch[key] = value + if len(patch) > 1: + updated.append(patch) + self._data[profile_id] = fresh + self._errors[profile_id] = "" + self._updated_at[profile_id] = time() + return {"ok": True, "profile_id": profile_id, "added": added, "updated": updated, "removed": removed} + except Exception as exc: + with self._lock: + self._errors[profile_id] = str(exc) + return {"ok": False, "profile_id": profile_id, "error": str(exc), "added": [], "updated": [], "removed": []} + + +torrent_cache = TorrentCache() diff --git a/pytorrent/services/torrent_summary.py b/pytorrent/services/torrent_summary.py new file mode 100644 index 0000000..e3476a6 --- /dev/null +++ b/pytorrent/services/torrent_summary.py @@ -0,0 +1,130 @@ +from __future__ import annotations + +from copy import deepcopy +from threading import RLock +from time import time + +SUMMARY_CACHE_TTL_SECONDS = 60 + +_ERROR_PATTERNS = ( + "error", + "failed", + "failure", + "timeout", + "timed out", + "tracker", + "could not", + "cannot", + "refused", + "unreachable", + "denied", +) +_SUMMARY_TYPES = ("all", "downloading", "seeding", "paused", "checking", "error", "stopped") +_summary_cache: dict[int, dict] = {} +_summary_lock = RLock() + + +def _number(row: dict, key: str) -> int: + try: + return int(float(row.get(key) or 0)) + except (TypeError, ValueError): + return 0 + + +def _has_error(row: dict) -> bool: + message = str(row.get("message") or "").strip().lower() + return bool(message and any(pattern in message for pattern in _ERROR_PATTERNS)) + + +def _matches(row: dict, summary_type: str) -> bool: + status = str(row.get("status") or "") + if summary_type == "all": + return True + if summary_type == "downloading": + return not bool(row.get("complete")) and bool(row.get("state")) and not bool(row.get("paused")) + if summary_type == "seeding": + return status != "Checking" and bool(row.get("complete")) and bool(row.get("state")) and not bool(row.get("paused")) + if summary_type == "paused": + return bool(row.get("paused")) or status == "Paused" + if summary_type == "checking": + return status == "Checking" or _number(row, "hashing") > 0 + if summary_type == "error": + return _has_error(row) + if summary_type == "stopped": + return not bool(row.get("state")) + return False + + +def _empty_bucket() -> dict: + return { + "count": 0, + "size": 0, + "disk_bytes": 0, + "completed_bytes": 0, + "remaining_bytes": 0, + "progress_percent": 0.0, + "remaining_percent": 100.0, + # Kept for backward compatibility with older clients; not used by the filters UI. + "down_total": 0, + "up_total": 0, + } + + +def build_summary(rows: list[dict]) -> dict: + filters = {summary_type: _empty_bucket() for summary_type in _SUMMARY_TYPES} + for row in rows: + for summary_type in _SUMMARY_TYPES: + if not _matches(row, summary_type): + continue + bucket = filters[summary_type] + bucket["count"] += 1 + size = _number(row, "size") + completed = min(size, _number(row, "completed_bytes")) if size else _number(row, "completed_bytes") + bucket["size"] += size + bucket["completed_bytes"] += completed + bucket["disk_bytes"] += completed + bucket["down_total"] += _number(row, "down_total") + bucket["up_total"] += _number(row, "up_total") + for bucket in filters.values(): + bucket["remaining_bytes"] = max(0, bucket["size"] - bucket["completed_bytes"]) + if bucket["size"] > 0: + bucket["progress_percent"] = round((bucket["completed_bytes"] / bucket["size"]) * 100, 1) + bucket["remaining_percent"] = round(100 - bucket["progress_percent"], 1) + else: + bucket["progress_percent"] = 0.0 + bucket["remaining_percent"] = 0.0 + now = time() + return { + "filters": filters, + "cache_ttl_seconds": SUMMARY_CACHE_TTL_SECONDS, + "generated_at_epoch": now, + "cached": False, + } + + +def cached_summary(profile_id: int, rows: list[dict], force: bool = False) -> dict: + now = time() + with _summary_lock: + cached = _summary_cache.get(int(profile_id)) + rows_count = len(rows or []) + cached_count = int(((cached or {}).get("filters") or {}).get("all", {}).get("count") or 0) + cache_is_fresh = cached and now - float(cached.get("generated_at_epoch") or 0) < SUMMARY_CACHE_TTL_SECONDS + cache_is_usable = cache_is_fresh and not (cached_count == 0 and rows_count > 0) + if not force and cache_is_usable: + result = deepcopy(cached) + result["cached"] = True + return result + result = build_summary(rows or []) + # Do not cache an empty cold-start snapshot. On first connection the cache may be populated + # before rTorrent refresh finishes, which would otherwise show zeros for the full TTL. + if rows_count > 0 or force: + _summary_cache[int(profile_id)] = deepcopy(result) + return result + + +def invalidate_summary(profile_id: int | None = None) -> None: + with _summary_lock: + if profile_id is None: + _summary_cache.clear() + else: + _summary_cache.pop(int(profile_id), None) diff --git a/pytorrent/services/traffic_history.py b/pytorrent/services/traffic_history.py new file mode 100644 index 0000000..cdb459f --- /dev/null +++ b/pytorrent/services/traffic_history.py @@ -0,0 +1,117 @@ +from __future__ import annotations + +from datetime import datetime, timedelta, timezone +from typing import Any + +from ..config import TRAFFIC_HISTORY_RETENTION_DAYS +from ..db import connect, utcnow +from . import retention + +_LAST_WRITE: dict[int, float] = {} +WRITE_EVERY_SECONDS = 60 + + +def _now_ts() -> float: + return datetime.now(timezone.utc).timestamp() + + +def record(profile_id: int, down_rate: int = 0, up_rate: int = 0, total_down: int = 0, total_up: int = 0, force: bool = False) -> None: + """Store compact transfer samples. One sample per minute per profile keeps SQLite small.""" + profile_id = int(profile_id) + now_ts = _now_ts() + if not force and now_ts - _LAST_WRITE.get(profile_id, 0.0) < WRITE_EVERY_SECONDS: + return + _LAST_WRITE[profile_id] = now_ts + with connect() as conn: + conn.execute( + "INSERT INTO traffic_history(profile_id,down_rate,up_rate,total_down,total_up,created_at) VALUES(?,?,?,?,?,?)", + (profile_id, int(down_rate or 0), int(up_rate or 0), int(total_down or 0), int(total_up or 0), utcnow()), + ) + retention.cleanup() + + +def _range_to_cutoff(range_name: str) -> datetime: + now = datetime.now(timezone.utc) + if range_name == "15m": + return now - timedelta(minutes=15) + if range_name == "1h": + return now - timedelta(hours=1) + if range_name == "3h": + return now - timedelta(hours=3) + if range_name == "6h": + return now - timedelta(hours=6) + if range_name == "24h": + return now - timedelta(hours=24) + if range_name == "30d": + return now - timedelta(days=30) + if range_name == "90d": + return now - timedelta(days=90) + return now - timedelta(days=7) + + +def _bucket_for(range_name: str) -> str: + if range_name in {"15m", "1h", "3h"}: + return "%Y-%m-%d %H:%M" + if range_name in {"6h", "24h"}: + return "%Y-%m-%d %H:00" + return "%Y-%m-%d" + + +def _row_value(row: Any, key: str, index: int, default: Any = 0) -> Any: + # connect() uses dict_factory, so SQLite rows are dicts. The fallback keeps + # this function compatible with tuple/list rows in tests or future refactors. + if isinstance(row, dict): + return row.get(key, default) + try: + return row[index] + except (IndexError, KeyError, TypeError): + return default + + +def history(profile_id: int, range_name: str = "7d") -> dict[str, Any]: + cutoff = _range_to_cutoff(range_name) + bucket = _bucket_for(range_name) + cutoff_s = cutoff.isoformat(timespec="seconds") + bucket_name = "minute" if range_name in {"15m", "1h", "3h"} else ("hour" if range_name in {"6h", "24h"} else "day") + with connect() as conn: + raw = conn.execute( + """ + SELECT down_rate, up_rate, total_down, total_up, created_at + FROM traffic_history + WHERE profile_id=? AND created_at >= ? + ORDER BY created_at ASC + """, + (int(profile_id), cutoff_s), + ).fetchall() + + rows_by_bucket: dict[str, dict[str, Any]] = {} + prev_down = prev_up = None + for r in raw: + created = str(_row_value(r, "created_at", 4, "")) + try: + dt = datetime.fromisoformat(created.replace("Z", "+00:00")) + except Exception: + continue + b = dt.strftime(bucket) + item = rows_by_bucket.setdefault(b, {"bucket": b, "avg_down_rate": 0, "avg_up_rate": 0, "downloaded": 0, "uploaded": 0, "samples": 0}) + down_rate = int(_row_value(r, "down_rate", 0, 0) or 0) + up_rate = int(_row_value(r, "up_rate", 1, 0) or 0) + total_down = int(_row_value(r, "total_down", 2, 0) or 0) + total_up = int(_row_value(r, "total_up", 3, 0) or 0) + item["avg_down_rate"] += down_rate + item["avg_up_rate"] += up_rate + item["samples"] += 1 + if prev_down is not None and total_down >= prev_down: + item["downloaded"] += total_down - prev_down + if prev_up is not None and total_up >= prev_up: + item["uploaded"] += total_up - prev_up + prev_down, prev_up = total_down, total_up + + rows = [] + for item in rows_by_bucket.values(): + samples = max(1, int(item["samples"] or 1)) + item["avg_down_rate"] = round(item["avg_down_rate"] / samples) + item["avg_up_rate"] = round(item["avg_up_rate"] / samples) + rows.append(item) + rows.sort(key=lambda x: x["bucket"]) + return {"range": range_name, "bucket": bucket_name, "retention_days": TRAFFIC_HISTORY_RETENTION_DAYS, "rows": rows} diff --git a/pytorrent/services/websocket.py b/pytorrent/services/websocket.py new file mode 100644 index 0000000..1e1da6c --- /dev/null +++ b/pytorrent/services/websocket.py @@ -0,0 +1,84 @@ +from __future__ import annotations + +import psutil +from flask_socketio import emit +from ..config import POLL_INTERVAL +from .preferences import active_profile, get_profile +from .torrent_cache import torrent_cache +from .torrent_summary import cached_summary +from . import rtorrent, smart_queue, traffic_history, automation_rules + +_started = False + + +def register_socketio_handlers(socketio): + global _started + + def poller(): + tick = 0 + while True: + profile = active_profile() + if profile: + diff = torrent_cache.refresh(profile) + heartbeat = {"ok": bool(diff.get("ok")), "profile_id": profile["id"], "tick": tick, "error": diff.get("error", "")} + if diff.get("ok") and (diff["added"] or diff["updated"] or diff["removed"]): + socketio.emit("torrent_patch", {**diff, "summary": cached_summary(profile["id"], torrent_cache.snapshot(profile["id"]), force=True)}) + elif not diff.get("ok"): + socketio.emit("rtorrent_error", diff) + try: + status = rtorrent.system_status(profile) + if bool(profile.get("is_remote")): + status["usage_source"] = "remote-hidden" + status["usage_available"] = False + else: + status["cpu"] = psutil.cpu_percent(interval=None) + status["ram"] = psutil.virtual_memory().percent + status["usage_source"] = "local" + status["usage_available"] = True + status["profile_id"] = profile["id"] + traffic_history.record(profile["id"], status.get("down_rate", 0), status.get("up_rate", 0), status.get("total_down", 0), status.get("total_up", 0)) + socketio.emit("system_stats", status) + heartbeat["ok"] = True + except Exception as exc: + heartbeat["ok"] = False + heartbeat["error"] = str(exc) + socketio.emit("rtorrent_error", {"profile_id": profile["id"], "error": str(exc)}) + if tick % max(1, int(30 / POLL_INTERVAL)) == 0: + try: + result = smart_queue.check(profile, force=False) + if result.get("enabled"): + socketio.emit("smart_queue_update", result) + except Exception as exc: + socketio.emit("smart_queue_update", {"ok": False, "error": str(exc)}) + try: + auto_result = automation_rules.check(profile, force=False) + if auto_result.get("applied"): + socketio.emit("automation_update", auto_result) + except Exception as exc: + socketio.emit("automation_update", {"ok": False, "error": str(exc)}) + socketio.emit("heartbeat", heartbeat) + tick += 1 + socketio.sleep(POLL_INTERVAL) + + @socketio.on("connect") + def handle_connect(): + global _started + if not _started: + socketio.start_background_task(poller) + _started = True + profile = active_profile() + emit("connected", {"ok": True, "profile": profile}) + if profile: + rows = torrent_cache.snapshot(profile["id"]) + emit("torrent_snapshot", {"profile_id": profile["id"], "torrents": rows, "summary": cached_summary(profile["id"], rows)}) + + @socketio.on("select_profile") + def handle_select_profile(data): + profile_id = int((data or {}).get("profile_id") or 0) + profile = get_profile(profile_id) + if not profile: + emit("rtorrent_error", {"error": "Profile does not exist"}) + return + diff = torrent_cache.refresh(profile) + rows = torrent_cache.snapshot(profile_id) + emit("torrent_snapshot", {"profile_id": profile_id, "torrents": rows, "summary": cached_summary(profile_id, rows, force=True), "error": diff.get("error", "")}) diff --git a/pytorrent/services/workers.py b/pytorrent/services/workers.py new file mode 100644 index 0000000..0d9b210 --- /dev/null +++ b/pytorrent/services/workers.py @@ -0,0 +1,250 @@ +from __future__ import annotations + +import json +import threading +import time +import uuid +from concurrent.futures import ThreadPoolExecutor +from . import rtorrent +from .preferences import get_profile +from ..config import WORKERS +from ..db import connect, utcnow, default_user_id + +_executor = ThreadPoolExecutor(max_workers=WORKERS, thread_name_prefix="pytorrent-job") +_socketio = None +_semaphores: dict[int, threading.Semaphore] = {} +_exclusive_locks: dict[int, threading.Lock] = {} +_sem_lock = threading.Lock() + + +def set_socketio(socketio): + global _socketio + _socketio = socketio + + +def _emit(name: str, payload: dict): + if _socketio: + _socketio.emit(name, payload) + + +def _get_sem(profile: dict) -> threading.Semaphore: + profile_id = int(profile["id"]) + max_parallel = max(1, int(profile.get("max_parallel_jobs") or 3)) + with _sem_lock: + if profile_id not in _semaphores: + _semaphores[profile_id] = threading.Semaphore(max_parallel) + return _semaphores[profile_id] + + +def _get_exclusive_lock(profile_id: int) -> threading.Lock: + with _sem_lock: + if profile_id not in _exclusive_locks: + _exclusive_locks[profile_id] = threading.Lock() + return _exclusive_locks[profile_id] + + +def _job_row(job_id: str): + with connect() as conn: + return conn.execute("SELECT rowid AS _rowid, * FROM jobs WHERE id=?", (job_id,)).fetchone() + + +def _is_ordered_action(action_name: str) -> bool: + return action_name in {"move", "remove"} + + +def _has_prior_ordered_jobs(profile_id: int, rowid: int) -> bool: + with connect() as conn: + row = conn.execute( + """ + SELECT 1 + FROM jobs + WHERE profile_id=? + AND rowid bool: + while _has_prior_ordered_jobs(profile_id, rowid): + fresh = _job_row(job_id) + if not fresh or fresh["status"] == "cancelled": + return False + time.sleep(0.5) + return True + + +def _set_job(job_id: str, status: str, error: str = "", result: dict | None = None, started: bool = False, finished: bool = False): + now = utcnow() + fields = ["status=?", "error=?", "updated_at=?"] + values: list = [status, error, now] + if result is not None: + fields.append("result_json=?") + values.append(json.dumps(result)) + if started: + fields.append("started_at=?") + values.append(now) + if finished: + fields.append("finished_at=?") + values.append(now) + values.append(job_id) + with connect() as conn: + conn.execute(f"UPDATE jobs SET {', '.join(fields)} WHERE id=?", values) + + +def enqueue(action_name: str, profile_id: int, payload: dict, user_id: int | None = None, max_attempts: int = 2) -> str: + user_id = user_id or default_user_id() + job_id = uuid.uuid4().hex + now = utcnow() + with connect() as conn: + conn.execute( + "INSERT INTO jobs(id,user_id,profile_id,action,payload_json,status,attempts,max_attempts,created_at,updated_at) VALUES(?,?,?,?,?,?,?,?,?,?)", + (job_id, user_id, profile_id, action_name, json.dumps(payload), "pending", 0, max_attempts, now, now), + ) + _emit("job_update", {"id": job_id, "action": action_name, "profile_id": profile_id, "status": "pending"}) + _executor.submit(_run, job_id) + return job_id + + +def _execute(profile: dict, action_name: str, payload: dict): + if action_name == "add_magnet": + return rtorrent.add_magnet(profile, payload["uri"], bool(payload.get("start", True)), str(payload.get("directory") or ""), str(payload.get("label") or "")) + if action_name == "add_torrent_raw": + import base64 + raw = base64.b64decode(payload["data_b64"]) + return rtorrent.add_torrent_raw(profile, raw, bool(payload.get("start", True)), str(payload.get("directory") or ""), str(payload.get("label") or "")) + if action_name == "set_limits": + return rtorrent.set_limits(profile, payload.get("down"), payload.get("up")) + hashes = payload.get("hashes") or [] + return rtorrent.action(profile, hashes, action_name, payload) + + +def _run(job_id: str): + job = _job_row(job_id) + if not job or job["status"] == "cancelled": + return + profile = get_profile(int(job["profile_id"]), int(job["user_id"])) + if not profile: + _set_job(job_id, "failed", "rTorrent profile does not exist", finished=True) + _emit("job_update", {"id": job_id, "status": "failed", "error": "profile not found"}) + return + profile_id = int(profile["id"]) + ordered_lock = None + if _is_ordered_action(str(job["action"])): + if not _wait_for_prior_ordered_jobs(job_id, profile_id, int(job["_rowid"])): + return + ordered_lock = _get_exclusive_lock(profile_id) + ordered_lock.acquire() + sem = _get_sem(profile) + sem.acquire() + try: + job = _job_row(job_id) + if not job or job["status"] == "cancelled": + return + payload = json.loads(job.get("payload_json") or "{}") + attempts = int(job.get("attempts") or 0) + 1 + with connect() as conn: + conn.execute("UPDATE jobs SET status='running', attempts=?, started_at=COALESCE(started_at, ?), updated_at=? WHERE id=?", (attempts, utcnow(), utcnow(), job_id)) + _emit("operation_started", {"job_id": job_id, "action": job["action"], "profile_id": profile["id"], "hashes": payload.get("hashes") or [], "hash_count": len(payload.get("hashes") or []), "bulk": len(payload.get("hashes") or []) > 1}) + _emit("job_update", {"id": job_id, "status": "running", "attempts": attempts}) + result = _execute(profile, job["action"], payload) + _set_job(job_id, "done", result=result, finished=True) + _emit("operation_finished", {"job_id": job_id, "action": job["action"], "profile_id": profile["id"], "hashes": payload.get("hashes") or [], "hash_count": len(payload.get("hashes") or []), "bulk": len(payload.get("hashes") or []) > 1, "result": result}) + _emit("job_update", {"id": job_id, "status": "done", "result": result}) + except Exception as exc: + fresh = _job_row(job_id) or {} + attempts = int(fresh.get("attempts") or 1) + max_attempts = int(fresh.get("max_attempts") or 2) + status = "pending" if attempts < max_attempts else "failed" + _set_job(job_id, status, str(exc), finished=(status == "failed")) + _emit("operation_failed", {"job_id": job_id, "action": job.get("action"), "profile_id": job.get("profile_id"), "hashes": payload.get("hashes") or [], "error": str(exc)}) + _emit("job_update", {"id": job_id, "status": status, "error": str(exc), "attempts": attempts}) + if status == "pending": + _executor.submit(_run, job_id) + finally: + sem.release() + if ordered_lock: + ordered_lock.release() + + +def _safe_json(value, fallback): + try: + return json.loads(value or "") + except Exception: + return fallback + + +def _job_summary(row: dict, payload: dict, result: dict) -> str: + ctx = payload.get("job_context") or {} + count = int(ctx.get("hash_count") or len(payload.get("hashes") or []) or result.get("count") or 0) + parts = [] + if count: + parts.append(("bulk " if count > 1 else "single ") + f"{count} torrent(s)") + if ctx.get("target_path"): + parts.append(f"target: {ctx.get('target_path')}") + if ctx.get("remove_data"): + parts.append("remove data") + if ctx.get("move_data"): + parts.append("move data") + if result.get("count") is not None: + parts.append(f"done: {result.get('count')}") + if result.get("errors"): + parts.append(f"errors: {len(result.get('errors') or [])}") + return "; ".join(parts) + + +def _public_job(row) -> dict: + d = dict(row) + payload = _safe_json(d.get("payload_json"), {}) + result = _safe_json(d.get("result_json"), {}) + ctx = payload.get("job_context") or {} + d["payload"] = payload + d["result"] = result + d["hash_count"] = int(ctx.get("hash_count") or len(payload.get("hashes") or []) or result.get("count") or 0) + d["is_bulk"] = bool(ctx.get("bulk") or d["hash_count"] > 1) + d["summary"] = _job_summary(d, payload, result) + items = ctx.get("items") or [] + if d["is_bulk"]: + d["items_preview"] = "" + else: + d["items_preview"] = ", ".join([str((x or {}).get("name") or (x or {}).get("hash") or "") for x in items[:1] if x]) + return d + + +def list_jobs(limit: int = 200, offset: int = 0): + limit = max(1, min(int(limit or 50), 500)) + offset = max(0, int(offset or 0)) + with connect() as conn: + rows = conn.execute("SELECT * FROM jobs ORDER BY created_at DESC LIMIT ? OFFSET ?", (limit, offset)).fetchall() + total = conn.execute("SELECT COUNT(*) AS n FROM jobs").fetchone()["n"] + return {"rows": [_public_job(r) for r in rows], "total": total, "limit": limit, "offset": offset} + + +def cancel_job(job_id: str) -> bool: + row = _job_row(job_id) + if not row or row["status"] not in {"pending", "failed"}: + return False + _set_job(job_id, "cancelled", finished=True) + _emit("job_update", {"id": job_id, "status": "cancelled"}) + return True + + +def clear_jobs() -> int: + with connect() as conn: + cur = conn.execute("DELETE FROM jobs WHERE status NOT IN ('pending', 'running')") + return int(cur.rowcount or 0) + + +def retry_job(job_id: str) -> bool: + row = _job_row(job_id) + if not row or row["status"] not in {"failed", "cancelled"}: + return False + with connect() as conn: + conn.execute("UPDATE jobs SET status='pending', error='', finished_at=NULL, updated_at=? WHERE id=?", (utcnow(), job_id)) + _emit("job_update", {"id": job_id, "status": "pending"}) + _executor.submit(_run, job_id) + return True diff --git a/pytorrent/static/app.js b/pytorrent/static/app.js new file mode 100644 index 0000000..9fb0b30 --- /dev/null +++ b/pytorrent/static/app.js @@ -0,0 +1,640 @@ +(() => { + const $ = (id) => document.getElementById(id); + const esc = (s) => String(s ?? "").replace(/[&<>'"]/g, c => ({"&":"&","<":"<",">":">","'":"'",'"':"""}[c])); + const ROW_HEIGHT = 34, OVERSCAN = 14; + const torrents = new Map(); + let visibleRows = [], selected = new Set(), selectedHash = null, lastSelectedHash = null, activeFilter = "all"; + let sortState = {key: "name", dir: 1}, renderPending = false, renderVersion = 0, lastRenderSignature = ""; + let lastLimits = {down: 0, up: 0}, pendingBusy = 0, pathTarget = null, lastPathParent = "/"; + const traffic = [], systemUsage = []; + const socket = io({transports:["polling"], reconnection:true, reconnectionAttempts:Infinity, reconnectionDelay:700, reconnectionDelayMax:5000, timeout:8000}); + const COLUMN_DEFS = [["status","Status"],["size","Size"],["progress","Progress"],["down_rate","DL"],["up_rate","UL"],["seeds","Seeds"],["peers","Peers"],["ratio","Ratio"],["path","Path"],["label","Label"],["ratio_group","Ratio group"]]; + let hiddenColumns = new Set((window.PYTORRENT?.tableColumns?.hidden || [])); + let knownLabels = []; + let jobsPage = 0, jobsLimit = 25, jobsTotal = 0, smartHistoryExpanded = false; + let peersRefreshTimer = null; + let peersRefreshSeconds = Number(window.PYTORRENT?.peersRefreshSeconds || 0); + let portCheckEnabled = !!Number(window.PYTORRENT?.portCheckEnabled || 0); + let bootstrapTheme = window.PYTORRENT?.bootstrapTheme || "default"; + let fontFamily = window.PYTORRENT?.fontFamily || "default"; + let modalLabels = new Set(), defaultDownloadPath = null; + let hasTorrentSnapshot = false, initialLoaderDone = false, rtConfigOriginal = new Map(), rtConfigFieldTypes = new Map(), rtConfigOriginalApplyOnStart = false; + let torrentSummary = null; + let profileCache = new Map(); + const activeOperations = new Map(); + + function toast(msg, type="secondary") { const h=$('toastHost'); if(!h) return; const el=document.createElement('div'); el.className=`toast-item text-bg-${type}`; el.innerHTML=esc(msg); h.appendChild(el); setTimeout(()=>el.remove(),3500); } + function setBusy(on){ pendingBusy += on ? 1 : -1; if(pendingBusy<0) pendingBusy=0; $('globalLoader')?.classList.toggle('d-none', pendingBusy===0); $('busyBadge')?.classList.toggle('d-none', pendingBusy===0); } + function setInitialLoader(title, text){ if(initialLoaderDone) return; if($('initialLoaderTitle') && title) $('initialLoaderTitle').textContent=title; if($('initialLoaderText') && text) $('initialLoaderText').textContent=text; } + function hideInitialLoader(){ if(initialLoaderDone) return; initialLoaderDone=true; $('initialLoader')?.classList.add('is-hidden'); } + function buttonBusy(btn,on){ if(!btn)return; btn.disabled=on; const label=btn.querySelector('.btn-label'); if(label){ if(!label.dataset.orig) label.dataset.orig=label.innerHTML; label.innerHTML=on?`Working...`:label.dataset.orig; }} + function activeTab(){ return document.querySelector('#detailTabs .nav-link.active')?.dataset.tab || 'general'; } + function loadingMarkup(label='Loading data...'){ return `
${esc(label)}
`; } + function loadingTableRow(label='Loading torrents...'){ return `${loadingMarkup(label)}`; } + function parseDate(value){ const raw=String(value||'').trim(); if(!raw) return null; const d=new Date(raw); return Number.isNaN(d.getTime()) ? null : {raw,d}; } + function formatDate(value, mode='short'){ + const parsed=parseDate(value); + if(!parsed) return String(value||''); + const opts=mode==='full' + ? {year:'numeric',month:'2-digit',day:'2-digit',hour:'2-digit',minute:'2-digit',second:'2-digit'} + : {month:'2-digit',day:'2-digit',hour:'2-digit',minute:'2-digit'}; + return new Intl.DateTimeFormat('pl-PL', opts).format(parsed.d).replace(',', ''); + } + function dateCell(value){ const parsed=parseDate(value); if(!parsed) return esc(value||''); return `${esc(formatDate(value))}`; } + function compactCell(value, max=120){ const text=String(value||""); if(!text) return ""; const short=text.length>max ? `${text.slice(0, Math.floor(max*0.62))}…${text.slice(-Math.floor(max*0.28))}` : text; return `${esc(short)}`; } + function progressBar(value, extraClass=''){ const pct=Math.max(0,Math.min(100,Number(value||0))); const hue=Math.round((pct/100)*120); const light=30+Math.round((pct/100)*5); const bg=pct<=0?'transparent':pct>=100?'var(--torrent-progress-complete)':`hsl(${hue} 52% ${light}%)`; const done=pct>=100?' is-complete':''; const cls=extraClass?` ${extraClass}`:''; return `
${esc(pct)}%
`; } + function progress(t){ return progressBar(t.progress); } + // Note: Displays status filter summaries calculated and cached by the backend API. + const FILTER_COUNT_IDS = {all:'countAll', downloading:'countDownloading', seeding:'countSeeding', paused:'countPaused', checking:'countChecking', error:'countError', stopped:'countStopped'}; + function formatFilterBytes(value){ return fmtBytes(value).replace(/\.0 (?=GiB|TiB)/, ' '); } + function filterMetaLine(bucket){ + if(!bucket || !Number(bucket.count||0)) return ''; + const disk=Number(bucket.disk_bytes ?? bucket.completed_bytes ?? 0); + return `Data ${formatFilterBytes(disk)}`; + } + function filterNeedsDownloadDetails(type, bucket){ + if(!bucket || !Number(bucket.count||0)) return false; + if(type==='downloading') return true; + if(type!=='paused' && type!=='stopped') return false; + const size=Number(bucket.size||0); + const completed=Number(bucket.completed_bytes ?? bucket.disk_bytes ?? 0); + const remaining=Number(bucket.remaining_bytes ?? Math.max(0, size-completed)); + const progress=Number(bucket.progress_percent ?? (size ? (completed / size) * 100 : 0)); + return size > 0 && remaining > 0 && progress < 100; + } + function filterTooltipLine(bucket, type){ + if(!bucket || !Number(bucket.count||0)) return ''; + const size=Number(bucket.size||0); + const disk=Number(bucket.disk_bytes ?? bucket.completed_bytes ?? 0); + const completed=Number(bucket.completed_bytes ?? disk); + const remaining=Number(bucket.remaining_bytes ?? Math.max(0, size-completed)); + const progress=Number(bucket.progress_percent ?? (size ? (completed / size) * 100 : 0)); + const left=Number(bucket.remaining_percent ?? Math.max(0, 100-progress)); + const lines=[`Data: ${formatFilterBytes(disk)}`]; + if(filterNeedsDownloadDetails(type, bucket)){ + lines.push(`Total to download: ${formatFilterBytes(size)}`); + lines.push(`Downloaded: ${formatFilterBytes(completed)} (${progress.toFixed(1)}%)`); + lines.push(`Left: ${formatFilterBytes(remaining)} (${left.toFixed(1)}%)`); + } + return lines.join('\n'); + } + function setFilterSummary(type){ + const el=$(FILTER_COUNT_IDS[type]); + if(!el) return; + const bucket=torrentSummary?.filters?.[type] || {count:0}; + const meta=filterMetaLine(bucket, type); + const tooltip=filterTooltipLine(bucket, type); + el.innerHTML=`${esc(bucket.count||0)}${meta?`${esc(meta)}`:''}`; + const button=el.closest('.filter'); + if(button){ + if(tooltip){ + button.title=tooltip; + button.setAttribute('aria-label', `${button.dataset.filter || type}: ${tooltip.replace(/\n/g, ', ')}`); + } else { + button.removeAttribute('title'); + button.removeAttribute('aria-label'); + } + } + } + function labelNames(value){ return String(value||'').split(/[,;|]+/).map(x=>x.trim()).filter(Boolean).filter((x,i,a)=>a.indexOf(x)===i); } + function labelValue(labels){ return [...new Set((labels||[]).map(x=>String(x||'').trim()).filter(Boolean))].join(', '); } + function rowHasLabel(t,label){ return labelNames(t.label).includes(label); } + function torrentHasError(t){ return !!torrentWarning(t); } + function rowVisible(t){ const q=($('searchBox')?.value||'').toLowerCase(); if(q && ![t.name,t.path,t.label,t.hash,t.ratio_group].join(' ').toLowerCase().includes(q)) return false; if(activeFilter==='downloading') return !t.complete && t.state && !t.paused; if(activeFilter==='seeding') return t.status!=='Checking' && t.complete && t.state && !t.paused; if(activeFilter==='paused') return !!t.paused || t.status==='Paused'; if(activeFilter==='checking') return t.status==='Checking' || Number(t.hashing||0)>0; if(activeFilter==='error') return torrentHasError(t); if(activeFilter==='stopped') return !t.state; if(activeFilter.startsWith('label:')) return rowHasLabel(t,activeFilter.slice(6)); return true; } + function compareRows(a,b){ const k=sortState.key; let av=a[k], bv=b[k]; if(typeof av==='string'||typeof bv==='string') return String(av||'').localeCompare(String(bv||''))*sortState.dir; return ((Number(av||0)>Number(bv||0))?1:(Number(av||0)0?" ":" "; } + function updateSortHeaders(){ document.querySelectorAll('.torrent-table thead th[data-sort]').forEach(th=>{ const base=th.dataset.baseText||th.textContent.trim(); th.dataset.baseText=base; th.innerHTML=`${esc(base)}${sortIcon(th.dataset.sort)}`; th.classList.toggle('sorted',sortState.key===th.dataset.sort); }); } + // Note: Refreshes sidebar counters from the cached API summary, not from browser-side aggregation. + function renderCounts(){ + Object.keys(FILTER_COUNT_IDS).forEach(setFilterSummary); + $('statSelected').textContent=selected.size; + } + function renderLabelFilters(){ const box=$('labelFilters'); if(!box) return; const counts=new Map(); [...torrents.values()].forEach(t=>labelNames(t.label).forEach(l=>counts.set(l,(counts.get(l)||0)+1))); const labels=[...counts.keys()].filter(l=>counts.get(l)>0).sort((a,b)=>a.localeCompare(b)); if(activeFilter.startsWith('label:') && !counts.has(activeFilter.slice(6))) activeFilter='all'; box.innerHTML=labels.length?`
Labels
${labels.map(l=>``).join('')}`:''; box.querySelectorAll('.filter').forEach(b=>b.addEventListener('click',()=>{document.querySelectorAll('.filter').forEach(x=>x.classList.remove('active')); b.classList.add('active'); activeFilter=b.dataset.filter; if($('tableWrap'))$('tableWrap').scrollTop=0; scheduleRender(true);})); } + function buildVisibleRows(){ visibleRows=[...torrents.values()].filter(rowVisible).sort(compareRows); $('statShown').textContent=visibleRows.length; } + function applyColumnVisibility(){ document.querySelectorAll('[data-col]').forEach(el=>el.classList.toggle('hidden-col', hiddenColumns.has(el.dataset.col))); } + function actionLabel(action){ + const labels={start:'Starting',pause:'Pausing',stop:'Stopping',resume:'Resuming',recheck:'Checking',reannounce:'Reannouncing',remove:'Removing',move:'Moving',set_label:'Setting label',set_ratio_group:'Setting ratio'}; + return labels[action] || `Working: ${action}`; + } + function actionIcon(action){ + return ({start:'fa-play',pause:'fa-pause',stop:'fa-stop',resume:'fa-play',recheck:'fa-rotate',reannounce:'fa-bullhorn',remove:'fa-trash',move:'fa-folder-open',set_label:'fa-tag',set_ratio_group:'fa-scale-balanced'}[action]) || 'fa-gears'; + } + function markTorrentOperation(hashes, action, jobId, state='queued'){ + const label=actionLabel(action); + [...new Set(hashes||[])].filter(Boolean).forEach(hash=>activeOperations.set(hash,{action,jobId,state,label,updatedAt:Date.now()})); + scheduleRender(true); + } + function clearJobOperation(jobId, hashes=[]){ + if(jobId){ [...activeOperations].forEach(([hash,op])=>{ if(op.jobId===jobId) activeOperations.delete(hash); }); } + (hashes||[]).forEach(hash=>activeOperations.delete(hash)); + scheduleRender(true); + } + function activeOperationFor(t){ return activeOperations.get(t.hash) || null; } + function statusMeta(t){ + const op=activeOperationFor(t); + if(op) return {cls:'text-bg-info operation-status-badge', icon:actionIcon(op.action), color:'text-info', label:op.label}; + const status=String(t.status||'').toLowerCase(); + if(t.paused || status==='paused') return {cls:'text-bg-warning', icon:'fa-pause', color:'text-warning'}; + if(status==='checking' || Number(t.hashing||0)>0) return {cls:'text-bg-info', icon:'fa-rotate', color:'text-info'}; + if(status==='seeding') return {cls:'text-bg-success', icon:'fa-seedling', color:'text-success'}; + if(status==='downloading') return {cls:'text-bg-primary', icon:'fa-download', color:'text-primary'}; + if(status==='stopped') return {cls:'text-bg-secondary', icon:'fa-stop', color:'text-secondary'}; + return t.state ? {cls:'text-bg-success', icon:'fa-play', color:'text-success'} : {cls:'text-bg-secondary', icon:'fa-circle', color:'text-secondary'}; + } + function statusBadge(t){ const m=statusMeta(t); return `${esc(m.label || t.status)}`; } + function torrentWarning(t){ const msg=String(t.message||'').trim(); if(!msg) return null; const l=msg.toLowerCase(); const patterns=['error','failed','failure','timeout','timed out','tracker','could not','cannot','refused','unreachable','denied']; return patterns.some(p=>l.includes(p)) ? msg : null; } + function torrentNameIcon(t){ const m=statusMeta(t); return ``; } + function renderRow(t){ const labels=labelNames(t.label).map(l=>` ${esc(l)}`).join(' '); const warn=torrentWarning(t); const op=activeOperationFor(t); const classes=[selected.has(t.hash)?'selected':'', t.paused?'torrent-paused':'', op?'torrent-operating':'', warn?'torrent-warning':''].filter(Boolean).join(' '); const title=[t.name,warn,op?op.label:''].filter(Boolean).join('\n'); return `${warn?' ':''}${torrentNameIcon(t)} ${esc(t.name)}${statusBadge(t)}${esc(t.size_h)}${progress(t)}${esc(t.down_rate_h)}${esc(t.up_rate_h)}${esc(t.seeds)}${esc(t.peers)}${esc(t.ratio)}${esc(t.path)}${labels||'-'}${esc(t.ratio_group||'')}`; } + function mobileFilterDefs(){ const arr=[...torrents.values()]; const f=torrentSummary?.filters||{}; const defs=[['all','All',f.all?.count??0],['downloading','Downloading',f.downloading?.count??0],['seeding','Seeding',f.seeding?.count??0],['paused','Paused',f.paused?.count??0],['checking','Checking',f.checking?.count??0],['error','With error',f.error?.count??0],['stopped','Stopped',f.stopped?.count??0]]; const counts=new Map(); arr.forEach(t=>labelNames(t.label).forEach(l=>counts.set(l,(counts.get(l)||0)+1))); [...counts.keys()].sort((a,b)=>a.localeCompare(b)).forEach(l=>defs.push([`label:${l}`,l,counts.get(l),'label'])); return defs; } + function renderMobileFilters(){ const bar=$('mobileFilterBar'); if(!bar) return; const allVisible=visibleRows.length>0 && visibleRows.every(t=>selected.has(t.hash)); const someVisible=visibleRows.some(t=>selected.has(t.hash)); const opts=mobileFilterDefs().map(([key,label,count,type])=>``).join(''); bar.innerHTML=`
${selected.size} selected
`; } + function renderMobile(){ const list=$('mobileList'); if(!list) return; const src=visibleRows.length?visibleRows:[...torrents.values()].filter(rowVisible).sort(compareRows); const rows=src.slice(0,250); renderMobileFilters(); list.innerHTML=rows.map(t=>{ const warn=torrentWarning(t); const op=activeOperationFor(t); const classes=[selected.has(t.hash)?'selected':'', op?'torrent-operating':'', warn?'torrent-warning':''].filter(Boolean).join(' '); return `
${warn?' ':''}${torrentNameIcon(t)} ${esc(t.name)}
${statusBadge(t)} · ${esc(t.progress)}% · Ratio ${esc(t.ratio)}
DL ${esc(t.down_rate_h)} / UL ${esc(t.up_rate_h)}
${esc(t.path)}
${progress(t)}
`; }).join('') || (hasTorrentSnapshot ? `
No torrents.
` : loadingMarkup('Loading torrents...')); } + function renderTable(){ updateBulkBar(); renderCounts(); renderLabelFilters(); updateSortHeaders(); buildVisibleRows(); renderMobile(); const body=$('torrentBody'); if(!visibleRows.length){ body.innerHTML=hasTorrentSnapshot?'No torrents for this filter.':loadingTableRow('Loading torrents...'); return; } const wrap=$('tableWrap'); const start=Math.max(0,Math.floor((wrap?.scrollTop||0)/ROW_HEIGHT)-OVERSCAN); const count=Math.ceil((wrap?.clientHeight||500)/ROW_HEIGHT)+OVERSCAN*2; const end=Math.min(visibleRows.length,start+count); const sig=`${renderVersion}:${start}:${end}:${visibleRows.length}:${sortState.key}:${sortState.dir}:${selected.size}:${activeFilter}:${$('searchBox')?.value||''}:${[...selected].slice(0,30).join(',')}`; if(sig===lastRenderSignature) return; lastRenderSignature=sig; const top=start*ROW_HEIGHT,bottom=Math.max(0,(visibleRows.length-end)*ROW_HEIGHT); body.innerHTML=(top?``:'')+visibleRows.slice(start,end).map(renderRow).join('')+(bottom?``:''); applyColumnVisibility(); } + function scheduleRender(force=false){ if(force){lastRenderSignature='';renderVersion++;} if(renderPending)return; renderPending=true; requestAnimationFrame(()=>{renderPending=false;renderTable();}); } + function patchRows(msg){ if(msg.summary) torrentSummary=msg.summary; (msg.removed||[]).forEach(h=>{torrents.delete(h);selected.delete(h);activeOperations.delete(h);if(selectedHash===h)selectedHash=null;}); (msg.added||[]).forEach(t=>torrents.set(t.hash,t)); (msg.updated||[]).forEach(p=>torrents.set(p.hash,{...(torrents.get(p.hash)||{}),...p})); scheduleRender(true); if(selectedHash&&torrents.has(selectedHash)&&activeTab()==='general') renderGeneral(); } + function selectedHashes(){ return [...selected]; } + function updateBulkBar(){ const bar=$("bulkBar"); if(!bar) return; bar.classList.toggle("d-none", selected.size<=1); const c=$("bulkSelectedCount"); if(c) c.textContent=selected.size; } + function setSelectionRange(hash, keepExisting=false){ const current=visibleRows.findIndex(t=>t.hash===hash); const last=visibleRows.findIndex(t=>t.hash===lastSelectedHash); if(current<0 || last<0){ selected.add(hash); lastSelectedHash=hash; return; } if(!keepExisting) selected.clear(); const a=Math.min(current,last), b=Math.max(current,last); visibleRows.slice(a,b+1).forEach(t=>selected.add(t.hash)); selectedHash=hash; } + async function post(url,data,method='POST'){ const res=await fetch(url,{method,headers:{'Content-Type':'application/json'},body:JSON.stringify(data||{})}); const json=await res.json(); if(!json.ok) throw new Error(json.error||'Operation failed'); return json; } + + async function runAction(action, extra={}){ const hashes=selectedHashes(); if(!hashes.length) return toast('No torrents selected','warning'); let payload={hashes,...extra}; if(action==='move'){ openPathPicker('move'); return; } setBusy(true); try{ const j=await post(`/api/torrents/${action}`,payload); markTorrentOperation(hashes, action, j.job_id, 'queued'); if(action==='recheck'){ hashes.forEach(h=>{ const t=torrents.get(h); if(t) torrents.set(h,{...t,status:'Checking',hashing:1,message:'Force recheck queued'}); }); scheduleRender(true); } toast(`${action} queued`,'success'); if(action==='set_label') await loadLabels(); }catch(e){toast(e.message,'danger');} finally{setBusy(false);} } + function flag(iso){ const code=String(iso||'').toLowerCase(); return code?` ${esc(code.toUpperCase())}`:'-'; } + function table(headers,rows){ return `${headers.map(h=>``).join('')}${rows.map(r=>`${r.map(c=>``).join('')}`).join('')}
${esc(h)}
${c}
`; } + function renderGeneral(){ const t=torrents.get(selectedHash); const labels=t?labelNames(t.label).map(l=>` ${esc(l)}`).join(' '):''; $('detailPane').innerHTML=t?`
Name${esc(t.name)}
Hash${esc(t.hash)}
Path${esc(t.path)}
Size${esc(t.size_h)}
Progress${esc(t.progress)}%
Ratio${esc(t.ratio)}
Downloaded${esc(t.down_total_h)}
Uploaded${esc(t.up_total_h)}
Labels${labels||'-'}
Ratio group${esc(t.ratio_group||'')}
`:'Select a torrent.'; } + const FILE_PRIORITY_LABELS = {0: "Skip", 1: "Normal", 2: "High"}; + function priorityClass(priority){ priority=Number(priority||0); return priority===2?"text-bg-success":priority===0?"text-bg-secondary":"text-bg-primary"; } + function renderFilePrioritySelect(f){ const p=Number(f.priority||0); return ``; } + function renderFiles(files){ + const pane=$('detailPane'); + const rows=(files||[]).map(f=>`${esc(f.path)}${esc(f.size_h)}${esc(f.progress??0)}%${esc(FILE_PRIORITY_LABELS[Number(f.priority||0)]||f.priority)}${renderFilePrioritySelect(f)}`).join(''); + pane.innerHTML=`
Changes are applied immediately in rTorrent.
${rows || ''}
PathSizeDonePrioritySet
No files.
`; + } + async function setFilePriorities(items){ + if(!selectedHash || !items.length) return; + setBusy(true); + try{ + const res=await fetch(`/api/torrents/${encodeURIComponent(selectedHash)}/files/priority`,{method:'POST',headers:{'Content-Type':'application/json'},body:JSON.stringify({files:items})}); + const j=await res.json(); + if(!j.ok || (j.errors&&j.errors.length)) throw new Error(j.errors?.[0]?.error || j.error || 'Priority update failed'); + toast(`Updated ${j.updated?.length||items.length} file priority item(s)`,'success'); + await loadDetails('files'); + }catch(e){ toast(e.message,'danger'); } finally{ setBusy(false); } + } + function peerBadges(p){ + const badges=[]; + if(p.encrypted) badges.push('enc'); + if(p.incoming) badges.push('in'); + if(p.snubbed) badges.push('snub'); + if(p.banned) badges.push('ban'); + return badges.join(' ') || '-'; + } + function renderPeers(peers){ + const rows=(peers||[]).map(p=>[flag(p.country_iso),esc(p.ip),esc(p.country),esc(p.city),esc(p.client),progressBar(p.completed,'peer-progress'),esc(p.down_rate_h),esc(p.up_rate_h),esc(p.port),peerBadges(p),`
`]); + $('detailPane').innerHTML=table(['Flag','IP','Country','City','Client','%','DL','UL','Port','Flags','Actions'],rows); + } + async function peerAction(index, action){ + if(!selectedHash) return; + setBusy(true); + try{ + const j=await post(`/api/torrents/${encodeURIComponent(selectedHash)}/peers/action`,{peer_index:Number(index),action}); + toast(j.message || `Peer ${action} done`,'success'); + await loadDetails('peers'); + }catch(e){ toast(e.message,'danger'); } + finally{ setBusy(false); } + } + function fmtTs(value){ const n=Number(value||0); if(!n) return '-'; try{return new Date(n*1000).toLocaleString();}catch(e){return String(n);} } + function trackerSeedsPeers(t){ const hasScrape = t.seeds !== null || t.peers !== null; return hasScrape ? `${t.seeds ?? "-"} / ${t.peers ?? "-"}` : "-"; } + function renderTrackers(trackers){ + const pane=$('detailPane'); + const rows=(trackers||[]).map(t=>{ + const idx=esc(t.index), url=esc(t.url); + return [`#${idx}`, `
${url || '-'}
`, t.enabled?'yes':'no', esc(trackerSeedsPeers(t)), esc(t.downloaded ?? '-'), fmtTs(t.last_announce), `
`]; + }); + pane.innerHTML=`
${table(['#','URL','On','Seeds / Peers','Done','Last announce','Actions'], rows.length?rows:[[ '-','No trackers.','','','','','' ]])}`; + } + function setTrackerEdit(index,on){ const sel=String(index); document.querySelector(`.tracker-url-view[data-tracker-index="${CSS.escape(sel)}"]`)?.classList.toggle('d-none', on); document.querySelector(`.tracker-url-edit[data-tracker-index="${CSS.escape(sel)}"]`)?.classList.toggle('d-none', !on); document.querySelector(`.tracker-edit-start[data-index="${CSS.escape(sel)}"]`)?.classList.toggle('d-none', on); document.querySelector(`.tracker-edit-save[data-index="${CSS.escape(sel)}"]`)?.classList.toggle('d-none', !on); document.querySelector(`.tracker-edit-cancel[data-index="${CSS.escape(sel)}"]`)?.classList.toggle('d-none', !on); } + async function trackerAction(action,payload={}){ + if(!selectedHash) return toast('No torrent selected','warning'); + setBusy(true); + try{ + const j=await post(`/api/torrents/${encodeURIComponent(selectedHash)}/trackers/${action}`,payload); + toast(j.message || `Tracker ${action} done`,'success'); + await loadDetails('trackers'); + }catch(e){toast(e.message,'danger');} + finally{setBusy(false);} + } + async function loadDetails(tab){ const t=torrents.get(selectedHash); if($('peersRefreshBox')) $('peersRefreshBox').classList.toggle('d-none', tab!=='peers'); setupPeersRefresh(tab); if(!t)return; if(tab==='general') return renderGeneral(); if(tab==='log'){ $('detailPane').innerHTML=`
${esc(t.message||'No logs')}
`; return; } const pane=$('detailPane'); pane.innerHTML=`
Loading ${esc(tab)}...
`; try{ const res=await fetch(`/api/torrents/${encodeURIComponent(selectedHash)}/${tab}`,{headers:{'Accept':'application/json'}}); const text=await res.text(); let json; try{ json=JSON.parse(text); }catch(parseErr){ throw new Error(`Invalid API response for ${tab}. HTTP ${res.status}`); } if(!res.ok || !json.ok) throw new Error(json.error||`HTTP ${res.status}`); if(tab!==activeTab()) return; if(tab==='files') renderFiles(json.files||[]); if(tab==='peers') renderPeers(json.peers||[]); if(tab==='trackers') renderTrackers(json.trackers||[]); }catch(e){pane.innerHTML=`
${esc(e.message)}
`;} } + function copyText(text){ + text=String(text ?? ''); + if(navigator.clipboard && window.isSecureContext){ + return navigator.clipboard.writeText(text); + } + return new Promise((resolve,reject)=>{ + const ta=document.createElement('textarea'); + ta.value=text; ta.setAttribute('readonly',''); + ta.style.position='fixed'; ta.style.left='-9999px'; ta.style.top='0'; + document.body.appendChild(ta); ta.focus(); ta.select(); + try{ document.execCommand('copy') ? resolve() : reject(new Error('copy command failed')); } + catch(e){ reject(e); } + finally{ ta.remove(); } + }); + } + function copySelected(field){ + const t=torrents.get(selectedHash); + if(!t) return toast('No torrent selected','warning'); + const value=String(t[field] ?? ''); + if(!value) return toast(`No ${field} to copy`,'warning'); + copyText(value).then(()=>toast(`Copied ${field}`,'success')).catch(()=>toast('Copy failed','danger')); + } + + async function getDefaultDownloadPath(){ if(defaultDownloadPath) return defaultDownloadPath; try{ const j=await (await fetch('/api/path/default')).json(); if(j.ok && j.path) defaultDownloadPath=j.path; }catch(e){} return defaultDownloadPath || '/'; } + async function applyDefaultDownloadPath(force=false){ const p=await getDefaultDownloadPath(); ['addPath','rssPath','autoEffectPath'].forEach(id=>{ const el=$(id); if(el && (force || !el.value)) el.value=p; }); return p; } + async function openPathPicker(target){ pathTarget=target; const def=await getDefaultDownloadPath(); const initial=def || ($(target)?.value||'/'); $('moveOptions')?.classList.toggle('d-none', target!=='move'); if($('moveDataPhysical')) $('moveDataPhysical').checked=true; if($('moveRecheck')) $('moveRecheck').checked=true; new bootstrap.Modal($('pathModal')).show(); browsePath(initial); } + async function browsePath(path){ $('pathList').innerHTML=' Loading...'; try{ const res=await fetch(`/api/path/browse?path=${encodeURIComponent(path||'/')}`); const j=await res.json(); if(!j.ok) throw new Error(j.error); $('pathCurrent').value=j.path; lastPathParent=j.parent; $('pathList').innerHTML=j.dirs.map(d=>`
${esc(d.name)}
`).join('')||'
No directories.
'; }catch(e){$('pathList').innerHTML=`
${esc(e.message)}
`;} } + $('pathList')?.addEventListener('click',e=>{const r=e.target.closest('.path-row'); if(r) browsePath(r.dataset.path);}); $('pathGoBtn')?.addEventListener('click',()=>browsePath($('pathCurrent').value)); $('pathUpBtn')?.addEventListener('click',()=>browsePath(lastPathParent)); $('pathReloadBtn')?.addEventListener('click',()=>browsePath($('pathCurrent').value)); $('pathSelectBtn')?.addEventListener('click',async()=>{const p=$('pathCurrent').value; if(pathTarget==='move'){ const hashes=selectedHashes(); const j=await post('/api/torrents/move',{hashes,path:p,move_data:!!($('moveDataPhysical')?.checked),recheck:!!($('moveRecheck')?.checked)}); markTorrentOperation(hashes,'move',j.job_id,'queued'); toast($('moveDataPhysical')?.checked?'physical move queued':'move queued','success'); } else if($(pathTarget)) $(pathTarget).value=p; bootstrap.Modal.getInstance($('pathModal'))?.hide();}); document.querySelectorAll('.browse-path').forEach(b=>b.addEventListener('click',()=>openPathPicker(b.dataset.target))); + + function renderColumnManager(){ const box=$('columnManager'); if(!box) return; box.innerHTML=COLUMN_DEFS.map(([key,label])=>``).join(''); } + $('saveColumnsBtn')?.addEventListener('click',async()=>{ document.querySelectorAll('.column-toggle').forEach(cb=>cb.checked?hiddenColumns.delete(cb.dataset.colKey):hiddenColumns.add(cb.dataset.colKey)); applyColumnVisibility(); scheduleRender(true); await post('/api/preferences',{table_columns_json:JSON.stringify({hidden:[...hiddenColumns]})}).catch(e=>toast(e.message,'danger')); toast('Columns saved','success'); }); + $('resetColumnsBtn')?.addEventListener('click',async()=>{ hiddenColumns.clear(); renderColumnManager(); applyColumnVisibility(); scheduleRender(true); await post('/api/preferences',{table_columns_json:JSON.stringify({hidden:[]})}).catch(()=>{}); }); + + async function loadJobs(page=jobsPage){ const box=$('jobsTable'); if(!box)return; jobsPage=Math.max(0,page|0); box.innerHTML=' Loading jobs...'; const offset=jobsPage*jobsLimit; const j=await (await fetch(`/api/jobs?limit=${jobsLimit}&offset=${offset}`)).json(); const rows=j.jobs||[]; jobsTotal=Number(j.total||rows.length); const details=r=>{ const count=Number(r.hash_count||0); if(r.is_bulk || count>1) return `bulk
${esc(count)} torrent(s), details hidden`; const bits=[]; if(count) bits.push(`${esc(count)} torrent`); if(r.summary) bits.push(esc(r.summary)); return bits.join('
') || '-'; }; box.innerHTML=table(['Status','Action','Profile','Count','Details','Attempts','Started','Finished','Error','Actions'],rows.map(r=>[`${esc(r.status)}`,esc(r.action),esc(r.profile_id),esc(r.hash_count||0),details(r),esc(r.attempts||0),dateCell(r.started_at||r.created_at),dateCell(r.finished_at||r.updated_at),compactCell(r.error||'',140),` `])); renderJobsPager(); } + function renderJobsPager(){ const p=$('jobsPager'); if(!p)return; const pages=Math.max(1,Math.ceil(jobsTotal/jobsLimit)); p.innerHTML=`
Page ${jobsPage+1} / ${pages} · ${jobsTotal} jobs
`; $('jobsPrev')?.addEventListener('click',()=>loadJobs(jobsPage-1)); $('jobsNext')?.addEventListener('click',()=>loadJobs(jobsPage+1)); } + $('jobsModal')?.addEventListener('show.bs.modal',loadJobs); $('refreshJobsBtn')?.addEventListener('click',loadJobs); $('jobsTable')?.addEventListener('click',async e=>{ const btn=e.target.closest('.job-retry,.job-cancel'); if(!btn)return; const id=btn.dataset.id; if(!id)return; if(btn.classList.contains('job-retry')) await post(`/api/jobs/${id}/retry`,{}).catch(x=>toast(x.message,'danger')); if(btn.classList.contains('job-cancel')) await post(`/api/jobs/${id}/cancel`,{}).catch(x=>toast(x.message,'danger')); loadJobs(); }); + $('clearJobsBtn')?.addEventListener('click',async()=>{ if(!confirm('Clear finished job logs? Pending and running jobs will stay.')) return; try{ const j=await post('/api/jobs/clear',{}); toast(`Cleared ${j.deleted||0} job log(s)`,'success'); jobsPage=0; loadJobs(0); }catch(e){ toast(e.message,'danger'); } }); + + async function loadLabels(){ const j=await (await fetch('/api/labels')).json(); const labels=j.labels||[]; knownLabels=labels; renderLabelFilters(); renderLabelChooser(); if($('labelsManager')) $('labelsManager').innerHTML=labels.length?labels.map(l=>`
${esc(l.name)}
`).join(''):'No labels.'; } + function renderLabelChooser(){ if($('selectedLabelList')) $('selectedLabelList').innerHTML=[...modalLabels].map(l=>``).join('') || 'No labels selected.'; if($('labelList')) $('labelList').innerHTML=knownLabels.map(l=>``).join('') || 'No saved labels.'; } + async function saveKnownLabel(name){ name=String(name||'').trim(); if(!name) return; await post('/api/labels',{name}); await loadLabels(); } + async function loadRatios(){ const j=await (await fetch('/api/ratio-groups')).json(); const groups=j.groups||[]; if($('ratioAssignSelect')) $('ratioAssignSelect').innerHTML=groups.map(g=>``).join(''); if($('ratioManager')) $('ratioManager').innerHTML=table(['Name','Min','Max','Seed min','Action','Enabled'],groups.map(g=>[esc(g.name),esc(g.min_ratio),esc(g.max_ratio),esc(g.seed_time_minutes),esc(g.action),g.enabled?'yes':'no'])); } + $('labelModal')?.addEventListener('show.bs.modal',async()=>{ modalLabels=new Set(selectedHashes().flatMap(h=>labelNames(torrents.get(h)?.label))); if($('labelInput')) $('labelInput').value=''; await loadLabels(); renderLabelChooser(); }); + $('saveLabelBtn')?.addEventListener('click',async()=>{ const typed=($('labelInput')?.value||'').split(/[,;|]+/).map(x=>x.trim()).filter(Boolean); for(const l of typed){ modalLabels.add(l); await saveKnownLabel(l); } await runAction('set_label',{label:labelValue([...modalLabels])}); bootstrap.Modal.getInstance($('labelModal'))?.hide(); }); + $('addLabelToSelectionBtn')?.addEventListener('click',async()=>{ const typed=($('labelInput')?.value||'').split(/[,;|]+/).map(x=>x.trim()).filter(Boolean); for(const l of typed){ modalLabels.add(l); await saveKnownLabel(l); } if($('labelInput')) $('labelInput').value=''; renderLabelChooser(); }); + $('clearLabelsBtn')?.addEventListener('click',()=>{ modalLabels.clear(); renderLabelChooser(); }); + $('labelList')?.addEventListener('click',e=>{ const chip=e.target.closest('.label-chip'); if(!chip) return; const v=chip.dataset.label||''; modalLabels.has(v)?modalLabels.delete(v):modalLabels.add(v); renderLabelChooser(); }); + $('selectedLabelList')?.addEventListener('click',e=>{ const chip=e.target.closest('.label-selected'); if(!chip) return; modalLabels.delete(chip.dataset.label||''); renderLabelChooser(); }); + $('newLabelBtn')?.addEventListener('click',async()=>{ await saveKnownLabel($('newLabelName')?.value||''); if($('newLabelName')) $('newLabelName').value=''; }); + $('ratioAssignModal')?.addEventListener('show.bs.modal',loadRatios); $('applyRatioBtn')?.addEventListener('click',async()=>{ await runAction('set_ratio_group',{ratio_group:$('ratioAssignSelect').value}); bootstrap.Modal.getInstance($('ratioAssignModal'))?.hide(); }); $('ratioSaveBtn')?.addEventListener('click',async()=>{ await post('/api/ratio-groups',{name:$('ratioName').value,min_ratio:$('ratioMin').value,max_ratio:$('ratioMax').value,seed_time_minutes:$('ratioSeed').value,action:$('ratioAction').value}); loadRatios(); }); + async function loadRss(){ const j=await (await fetch('/api/rss')).json(); const feeds=j.feeds||[], rules=j.rules||[]; if($('rssManager')) $('rssManager').innerHTML=`
Feeds
${table(['Name','URL','Last error'],feeds.map(f=>[esc(f.name),esc(f.url),esc(f.last_error||'')]))}
Rules
${table(['Name','Pattern','Path','Label'],rules.map(r=>[esc(r.name),esc(r.pattern),esc(r.save_path),esc(r.label)]))}`; } + + async function loadSmartQueue(){ if($('smartManager')) $('smartManager').innerHTML=loadingMarkup('Loading Smart Queue...'); if($('smartHistory')) $('smartHistory').innerHTML=loadingMarkup('Loading Smart Queue history...'); const historyLimit=smartHistoryExpanded?100:10; const j=await (await fetch(`/api/smart-queue?history_limit=${historyLimit}`)).json(); if(!j.ok) return; const st=j.settings||{}, ex=j.exclusions||[], hist=j.history||[]; const totalHistory=Number(j.history_total ?? hist.length); if($('smartEnabled')) $('smartEnabled').checked=!!st.enabled; if($('smartMaxActive')) $('smartMaxActive').value=st.max_active_downloads||5; if($('smartStalled')) $('smartStalled').value=st.stalled_seconds||300; if($('smartMinSpeed')) $('smartMinSpeed').value=Math.round((st.min_speed_bytes||0)/1024); if($('smartMinSeeds')) $('smartMinSeeds').value=st.min_seeds||1; if($('smartManager')) $('smartManager').innerHTML=ex.length?table(['Hash','Reason','Created','Action'],ex.map(x=>[esc(x.torrent_hash),esc(x.reason||''),dateCell(x.created_at),``])):'
No Smart Queue exceptions. Select torrents and use Exclude selected to keep them outside the queue.
'; if($('smartHistory')) { const body=hist.length?table(['Time','Event','Checked','Paused','Resumed'],hist.map(h=>[dateCell(h.created_at),esc(h.event),esc(h.checked_count||0),esc(h.paused_count||0),esc(h.resumed_count||0)])):'
No Smart Queue operations yet.
'; const canToggle=totalHistory>10; const toggle=canToggle?``:''; $('smartHistory').innerHTML=`${body}${toggle}`; } } + async function setSmartException(hashes, excluded, reason='manual'){ const list=[...new Set(hashes||[])].filter(Boolean); if(!list.length) return toast('No torrents selected','warning'); setBusy(true); try{ for(const h of list) await post('/api/smart-queue/exclusion',{hash:h,excluded,reason}); toast(excluded?'Smart Queue exception added':'Smart Queue exception removed','success'); await loadSmartQueue(); }catch(e){toast(e.message,'danger');} finally{setBusy(false);} } + async function saveSmartQueue(){ await post('/api/smart-queue',{enabled:$('smartEnabled')?.checked,max_active_downloads:$('smartMaxActive')?.value,stalled_seconds:$('smartStalled')?.value,min_speed_bytes:Math.round(Number($('smartMinSpeed')?.value||0)*1024),min_seeds:$('smartMinSeeds')?.value}); toast('Smart Queue saved','success'); await loadSmartQueue(); } + function normalizeRtConfigValue(value, type='text'){ + const raw=String(value ?? '').trim(); + if(type==='bool') return ['1','true','yes','on'].includes(raw.toLowerCase()) ? '1' : '0'; + if(type==='number'){ + if(raw==='') return '0'; + const normalized=Number(raw.replace(',', '.')); + return Number.isFinite(normalized) ? String(Math.trunc(normalized)) : raw; + } + return raw; + } + function rtConfigInputValue(input){ + return normalizeRtConfigValue(input.value, input.dataset.type || rtConfigFieldTypes.get(input.dataset.key) || 'text'); + } + function rtConfigOriginalValue(input){ + const key=input.dataset.key; + return normalizeRtConfigValue(input.dataset.original ?? rtConfigOriginal.get(key), input.dataset.type || rtConfigFieldTypes.get(key) || 'text'); + } + function collectRtConfigChanges(){ + const values={}; + document.querySelectorAll('.rt-config-input').forEach(input=>{ + if(input.disabled) return; + const cur=rtConfigInputValue(input); + const orig=rtConfigOriginalValue(input); + if(cur!==orig) values[input.dataset.key]=cur; + }); + return values; + } + function collectRtConfigClearKeys(){ + const keys=[]; + document.querySelectorAll('.rt-config-input').forEach(input=>{ + if(input.disabled || input.dataset.saved!=='true') return; + const cur=rtConfigInputValue(input); + const orig=rtConfigOriginalValue(input); + if(cur===orig) keys.push(input.dataset.key); + }); + return keys; + } + function updateRtConfigDirty(){ + const changed=collectRtConfigChanges(); + const clearKeys=collectRtConfigClearKeys(); + document.querySelectorAll('.rt-config-input').forEach(input=>{ + const row=input.closest('.rt-config-row'); + if(row) row.classList.toggle('changed', Object.prototype.hasOwnProperty.call(changed,input.dataset.key)); + }); + const configChanges=Object.keys(changed).length; + const applyChanged=!!$('rtConfigApplyOnStart') && $('rtConfigApplyOnStart').checked!==rtConfigOriginalApplyOnStart; + const total=configChanges + clearKeys.length + (applyChanged ? 1 : 0); + if($('rtConfigChangedCount')) $('rtConfigChangedCount').textContent=total?`${total} changed`:'No changes'; + if($('rtConfigGenerateBtn')) $('rtConfigGenerateBtn').disabled=!configChanges; + if($('rtConfigSaveBtn')) $('rtConfigSaveBtn').disabled=!total; + } + async function loadRtConfig(){ + const box=$('rtConfigManager'); + if(!box)return; + box.innerHTML=' Loading config...'; + try{ + const j=await (await fetch('/api/rtorrent-config')).json(); + if(!j.ok) throw new Error(j.error||'Config load failed'); + const fields=j.config?.fields||[]; + rtConfigOriginal=new Map(); + rtConfigFieldTypes=new Map(); + rtConfigOriginalApplyOnStart=!!j.config?.apply_on_start; + let lastGroup=''; + const html=fields.map(f=>{ + const group=f.group||'Other'; + const head=group!==lastGroup?`
${esc(group)}
`:''; + lastGroup=group; + const disabled=(!f.ok||f.readonly)?'disabled':''; + const type=['bool','number'].includes(f.type)?f.type:'text'; + const originalValue=normalizeRtConfigValue(f.baseline_value ?? f.current_value ?? f.value, type); + const displayValue=normalizeRtConfigValue(f.saved ? f.saved_value : (f.value ?? f.current_value), type); + rtConfigOriginal.set(f.key, originalValue); + rtConfigFieldTypes.set(f.key, type); + const note=f.ok?(f.readonly?' · read only':(f.saved?' · saved override · reference kept':'')):' · unavailable'; + const valueNote=f.saved?`Reference: ${esc(originalValue)} → saved: ${esc(displayValue)}`:''; + const originalAttr=esc(originalValue); + const input=type==='bool' + ? `` + : ``; + return `${head}`; + }).join(''); + box.innerHTML=`
${html}
`; + if($('rtConfigApplyOnStart')) $('rtConfigApplyOnStart').checked=rtConfigOriginalApplyOnStart; + updateRtConfigDirty(); + }catch(e){ box.innerHTML=`
${esc(e.message)}
`; } + } + async function saveRtConfig(){ + const values=collectRtConfigChanges(); + const clear_keys=collectRtConfigClearKeys(); + clear_keys.forEach(key=>{ + const input=document.querySelector(`.rt-config-input[data-key="${CSS.escape(key)}"]`); + if(input) values[key]=rtConfigOriginalValue(input); + }); + setBusy(true); + try{ + const j=await post('/api/rtorrent-config',{values,clear_keys,apply_on_start:!!$('rtConfigApplyOnStart')?.checked,apply_now:true}); + toast(`rTorrent config saved (${j.result?.updated?.length||0})`,'success'); + await loadRtConfig(); + }catch(e){ + toast(e.message,'danger'); + } finally{ + setBusy(false); + } + } + async function generateRtConfig(){ const values=collectRtConfigChanges(); try{ const res=await fetch('/api/rtorrent-config/generate',{method:'POST',headers:{'Content-Type':'application/json'},body:JSON.stringify({values})}); const j=await res.json(); if(!j.ok) throw new Error(j.error||'Generate failed'); if($('rtConfigOutput')) $('rtConfigOutput').value=j.config_text||''; toast('Config generated','success'); }catch(e){ toast(e.message,'danger'); } } + + function bootstrapThemeUrl(theme){ return theme && theme !== "default" ? `https://cdn.jsdelivr.net/npm/bootswatch@5.3.3/dist/${encodeURIComponent(theme)}/bootstrap.min.css` : "https://cdn.jsdelivr.net/npm/bootstrap@5.3.3/dist/css/bootstrap.min.css"; } + function applyBootstrapTheme(theme){ bootstrapTheme = theme || "default"; const link=$("bootstrapThemeStylesheet"); if(link) link.href = bootstrapThemeUrl(bootstrapTheme); if($("bootstrapThemeSelect")) $("bootstrapThemeSelect").value = bootstrapTheme; } + function applyFontFamily(font){ fontFamily = font || "default"; document.documentElement.dataset.appFont = fontFamily; if($("fontFamilySelect")) $("fontFamilySelect").value = fontFamily; } + async function saveAppearancePreferences(){ applyBootstrapTheme($("bootstrapThemeSelect")?.value || "default"); applyFontFamily($("fontFamilySelect")?.value || "default"); try{ await post("/api/preferences",{bootstrap_theme:bootstrapTheme,font_family:fontFamily}); toast("Appearance preferences saved","success"); }catch(e){ toast(e.message,"danger"); } } + + function setupPeersRefresh(tab=activeTab()){ clearInterval(peersRefreshTimer); peersRefreshTimer=null; if($('peersRefreshSelect')) $('peersRefreshSelect').value=String(peersRefreshSeconds||0); if(tab==='peers' && peersRefreshSeconds>0){ peersRefreshTimer=setInterval(()=>{ if(activeTab()==='peers' && selectedHash) loadDetails('peers'); }, peersRefreshSeconds*1000); } } + function syncMobileMode(){ const auto=window.matchMedia&&window.matchMedia("(max-width: 900px)").matches; document.body.classList.toggle("mobile-mode", auto || document.body.classList.contains("mobile-mode-manual")); scheduleRender(true); } + + + function automationCondition(){ const type=$('autoConditionType')?.value||'completed'; const cond={type}; if(type==='no_seeds'){cond.seeds=Number($('autoCondSeeds')?.value||0);cond.minutes=Number($('autoCondMinutes')?.value||0);} if(type==='ratio_gte')cond.ratio=Number($('autoCondRatio')?.value||1); if(type==='label_missing'||type==='label_has')cond.label=$('autoCondLabel')?.value||''; if(type==='status')cond.status=$('autoCondStatus')?.value||'Seeding'; if(type==='path_contains')cond.text=$('autoCondText')?.value||''; return cond; } + function automationEffect(){ const type=$('autoEffectType')?.value||'add_label'; const eff={type}; if(type==='move')eff.path=$('autoEffectPath')?.value||''; if(type==='add_label'||type==='remove_label')eff.label=$('autoEffectLabel')?.value||''; if(type==='set_labels')eff.labels=$('autoEffectLabels')?.value||''; return eff; } + function updateAutomationForm(){ const ct=$('autoConditionType')?.value||''; document.querySelectorAll('[data-auto-cond]').forEach(el=>el.classList.toggle('d-none', !el.dataset.autoCond.split(',').includes(ct))); const et=$('autoEffectType')?.value||''; document.querySelectorAll('[data-auto-effect]').forEach(el=>el.classList.toggle('d-none', !el.dataset.autoEffect.split(',').includes(et))); } + function ruleSummary(r){ const cs=(r.conditions||[]).map(c=>c.type==='no_seeds'?`no seeds <=${c.seeds||0} for ${c.minutes||0} min`:c.type==='ratio_gte'?`ratio >= ${c.ratio}`:c.type==='label_missing'?`missing label ${c.label||''}`:c.type==='label_has'?`has label ${c.label||''}`:c.type==='status'?`status ${c.status||''}`:c.type==='path_contains'?`path contains ${c.text||''}`:'completed').join(' + '); const es=(r.effects||[]).map(e=>e.type==='move'?`move to ${e.path||'default path'}`:e.type==='add_label'?`add label ${e.label||''}`:e.type==='remove_label'?`remove label ${e.label||''}`:e.type==='set_labels'?`set labels ${e.labels||''}`:e.type).join(' + '); return `${cs} → ${es}`; } + async function loadAutomations(){ const j=await (await fetch('/api/automations')).json(); const rules=j.rules||[], hist=j.history||[]; if($('automationManager')) $('automationManager').innerHTML=rules.length?rules.map(r=>`
${esc(r.name)} ${r.enabled?'on':'off'}
${esc(ruleSummary(r))} · cooldown ${esc(r.cooldown_minutes||0)} min
`).join(''):'
No automation rules.
'; if($('automationHistory')) $('automationHistory').innerHTML=hist.length?table(['Time','Rule','Torrent','Actions'],hist.map(h=>[esc(h.created_at),esc(h.rule_name||''),esc(h.torrent_name||h.torrent_hash||''),esc(h.actions_json||'')])):'
No automation history yet.
'; } + async function saveAutomation(){ const payload={name:$('autoName')?.value||'Automation rule',enabled:!!$('autoEnabled')?.checked,cooldown_minutes:Number($('autoCooldown')?.value||60),conditions:[automationCondition()],effects:[automationEffect()]}; setBusy(true); try{ await post('/api/automations',payload); toast('Automation rule saved','success'); await loadAutomations(); }catch(e){toast(e.message,'danger');} finally{setBusy(false);} } + + + function cleanupCountCard(label, value, note=''){ + return `
${esc(label)}${esc(value ?? 0)}${note?`${esc(note)}`:''}
`; + } + function renderCleanup(data={}){ + const box=$('cleanupManager'); if(!box) return; + const retention=data.retention_days||{}; + const db=data.database||{}; + const cards=[ + cleanupCountCard('Job logs total', data.jobs_total, `retention ${retention.jobs||'-'} days`), + cleanupCountCard('Job logs clearable', data.jobs_clearable, 'done / failed / cancelled'), + cleanupCountCard('Smart Queue logs', data.smart_queue_history_total, `retention ${retention.smart_queue_history||'-'} days`), + cleanupCountCard('Database size', db.size_h||db.size||'-', db.path||'') + ]; + box.innerHTML=`
${cards.join('')}
Job cleanup uses the existing job endpoint logic, so pending and running jobs are preserved.
`; + } + async function loadCleanup(){ + const box=$('cleanupManager'); if(!box) return; + box.innerHTML=' Loading cleanup data...'; + try{ + const j=await (await fetch('/api/cleanup/summary')).json(); + if(!j.ok) throw new Error(j.error||'Cleanup summary failed'); + renderCleanup(j.cleanup||{}); + }catch(e){ box.innerHTML=`
${esc(e.message)}
`; } + } + async function runCleanupAction(endpoint, label){ + if(!confirm(`${label}?`)) return; + setBusy(true); + try{ + const j=await post(endpoint,{}); + const deleted=typeof j.deleted==='object' ? Object.entries(j.deleted).map(([k,v])=>`${k}: ${v}`).join(', ') : String(j.deleted ?? 0); + toast(`Cleanup done (${deleted})`,'success'); + renderCleanup(j.cleanup||{}); + if(endpoint.includes('/jobs')){ jobsPage=0; loadJobs(0).catch(()=>{}); } + if(endpoint.includes('/smart-queue')) loadSmartQueue().catch(()=>{}); + }catch(e){ toast(e.message,'danger'); } + finally{ setBusy(false); } + } + + function diagCard(label,value,extra=''){ return `
${esc(label)}${esc(value ?? '-')}
`; } + + function portStatusLabel(st){ return st==='open'?'open':st==='closed'?'closed':st==='disabled'?'disabled':st==='error'?'error':'unknown'; } + function portStatusClass(st){ return st==='open'?'port-ok':st==='closed'?'port-bad':'port-secondary'; } + function portStatusIcon(st){ return st==='open'?'fa-circle-check':st==='closed'?'fa-circle-xmark':'fa-circle-question'; } + function portStatusBadge(data={},attrs='',withPort=false){ const st=portStatusLabel(data.status); const port=data.port?String(data.port):'-'; const label=withPort?`Port ${port} ${st}`:st; return ` ${esc(label)}`; } + function portCheckedAt(data={}){ if(data.checked_at) return String(data.checked_at).replace('T',' ').replace(/\+00:00$/,' UTC'); if(data.checked_at_epoch) return new Date(Number(data.checked_at_epoch)*1000).toLocaleString(); return ''; } + function portCheckDetails(data={}){ const bits=[]; if(data.port) bits.push(`Port: ${data.port}`); if(data.public_ip) bits.push(`Public IP: ${data.public_ip}`); if(data.remote) bits.push('Remote profile'); if(data.source) bits.push(`Source: ${data.source}`); const checked=portCheckedAt(data); if(checked) bits.push(`Last check: ${checked}`); if(data.cached) bits.push('Cached result'); if(data.error) bits.push(data.error); if(data.fallback_error) bits.push(data.fallback_error); return bits; } + function renderPortCheck(data={}){ + if($('portCheckEnabled')) $('portCheckEnabled').checked=!!data.enabled; + const details=portCheckDetails(data); + const title=details.join(' · ') || 'Port check disabled'; + if($('portCheckBadge')) $('portCheckBadge').outerHTML=portStatusBadge(data,'id="portCheckBadge" '); + if($('portCheckInfo')) $('portCheckInfo').textContent=details.join(' · ') || 'Uses YouGetSignal first. Manual check bypasses the 6h cache.'; + if($('statusPortCheck')){ + $('statusPortCheck').classList.toggle('d-none', !data.enabled); + $('statusPortCheck').title=title; + } + if($('statusPortCheckBadge')) $('statusPortCheckBadge').outerHTML=portStatusBadge(data,'id="statusPortCheckBadge" ',true); + } + async function loadPreferences(){ if($('portCheckEnabled')) $('portCheckEnabled').checked=portCheckEnabled; applyBootstrapTheme(bootstrapTheme); applyFontFamily(fontFamily); await loadPortCheck(false); } + async function savePortCheckPref(){ portCheckEnabled=!!$('portCheckEnabled')?.checked; try{ await post('/api/preferences',{port_check_enabled:portCheckEnabled}); toast('Preferences saved','success'); await loadPortCheck(false); }catch(e){ toast(e.message,'danger'); } } + async function loadPortCheck(force=false){ try{ const res=force?await post('/api/port-check',{}):await (await fetch('/api/port-check')).json(); if(!res.ok) throw new Error(res.error||'Port check failed'); renderPortCheck(res.port_check||{}); }catch(e){ renderPortCheck({status:'error',enabled:portCheckEnabled,error:e.message}); } } + async function loadAppStatus(){ + const box=$('appStatusManager'); if(!box) return; + box.innerHTML=' Loading diagnostics...'; + try{ + const j=await (await fetch('/api/app/status')).json(); + if(!j.ok) throw new Error(j.error||'Failed to load diagnostics'); + const st=j.status||{}, py=st.pytorrent||{}, scgi=st.scgi||{}, profile=st.profile||{}, pc=st.port_check||{}, cleanup=st.cleanup||{}, db=cleanup.database||{}; + const cards=[ + diagCard('pyTorrent PID', py.pid), diagCard('pyTorrent uptime', `${py.uptime_seconds||0}s`), diagCard('Memory RSS', py.memory_rss_h||py.memory_rss), + diagCard('Threads', py.threads), diagCard('CPU', `${py.cpu_percent ?? '-'}%`), diagCard('Jobs total', py.jobs_total), + diagCard('Worker threads', py.worker_threads), diagCard('Python', py.python||'-'), diagCard('DB size', db.size_h||'-'), + diagCard('Active profile', profile.name||profile.id||'-'), diagCard('API response time', `${st.api_ms ?? '-'} ms`), + diagCard('Job logs clearable', cleanup.jobs_clearable ?? '-'), diagCard('Smart Queue logs', cleanup.smart_queue_history_total ?? '-'), + diagCard('Port check', portStatusLabel(pc.status), pc.status==='closed'?'diag-error':''), diagCard('Incoming port', pc.port||'-'), diagCard('Port check source', pc.source||(pc.enabled?'unknown':'disabled')), + diagCard('SCGI status', scgi.ok?'OK':'ERROR', scgi.ok?'':'diag-error'), diagCard('SCGI URL', scgi.url||'-'), diagCard('SCGI connect', scgi.connect_ms!=null?`${scgi.connect_ms} ms`:'-'), + diagCard('SCGI first byte', scgi.first_byte_ms!=null?`${scgi.first_byte_ms} ms`:'-'), diagCard('SCGI total', scgi.total_ms!=null?`${scgi.total_ms} ms`:'-'), + diagCard('Request bytes', scgi.request_bytes), diagCard('Response bytes', scgi.response_bytes), diagCard('XML bytes', scgi.xml_bytes), diagCard('rTorrent version', scgi.client_version||'-') + ]; + box.innerHTML=`
${cards.join('')}
${scgi.error?`
${esc(scgi.error)}
`:''}`; + }catch(e){ box.innerHTML=`
${esc(e.message)}
`; } + } + + $('toolsModal')?.addEventListener('show.bs.modal',()=>{refreshProfiles();loadLabels();loadRatios();loadRss();loadSmartQueue();loadRtConfig();loadAutomations();loadCleanup();loadAppStatus();loadPreferences();renderColumnManager();applyColumnVisibility();updateAutomationForm();}); const toolPanelIds={rtorrents:'toolRtorrents',settings:'toolRtorrents',preferences:'toolPreferences',labels:'toolLabels',ratio:'toolRatio',rss:'toolRss',columns:'toolColumns',smart:'toolSmart',automations:'toolAutomations',rtconfig:'toolRtconfig',cleanup:'toolCleanup',appstatus:'toolAppstatus'}; const hideToolPanels=()=>Object.values(toolPanelIds).filter((v,i,a)=>a.indexOf(v)===i).forEach(id=>$(id)?.classList.add('d-none')); const showToolPanel=tool=>{hideToolPanels(); $(toolPanelIds[tool]||'toolRtorrents')?.classList.remove('d-none');}; document.querySelectorAll('.tool-tab').forEach(b=>b.addEventListener('click',()=>{const tool=b.dataset.tool||'rtorrents'; document.querySelectorAll('.tool-tab').forEach(x=>x.classList.remove('active')); b.classList.add('active'); showToolPanel(tool); if(tool==='appstatus') loadAppStatus(); if(tool==='cleanup') loadCleanup(); if(tool==='preferences') loadPreferences();})); $('rssFeedBtn')?.addEventListener('click',async()=>{await post('/api/rss/feeds',{name:$('rssName').value,url:$('rssUrl').value}); loadRss();}); $('rssRuleBtn')?.addEventListener('click',async()=>{await post('/api/rss/rules',{name:$('rssRuleName').value,pattern:$('rssPattern').value,save_path:$('rssPath').value,label:$('rssLabel').value}); loadRss();}); $('rssCheckBtn')?.addEventListener('click',async()=>{setBusy(true); try{const j=await post('/api/rss/check',{}); toast(`RSS queued ${j.queued} item(s)`,'success');}catch(e){toast(e.message,'danger');} finally{setBusy(false);}}); $('smartSaveBtn')?.addEventListener('click',saveSmartQueue); $('smartCheckBtn')?.addEventListener('click',async()=>{setBusy(true); try{const j=await post('/api/smart-queue/check',{}); const r=j.result||{}; toast(`Smart Queue: paused ${r.paused?.length||0}, resumed ${r.resumed?.length||0}`,'success'); await loadSmartQueue();}catch(e){toast(e.message,'danger');}finally{setBusy(false);}}); $('smartManager')?.addEventListener('click',async e=>{const h=e.target.closest('.smart-unexclude')?.dataset.hash; if(!h)return; await post('/api/smart-queue/exclusion',{hash:h,excluded:false}); await loadSmartQueue();}); $('cleanupManager')?.addEventListener('click',async e=>{ if(e.target.closest('#cleanupRefreshBtn')) return loadCleanup(); if(e.target.closest('#cleanupJobsBtn')) return runCleanupAction('/api/cleanup/jobs','Clear finished job logs'); if(e.target.closest('#cleanupSmartQueueBtn')) return runCleanupAction('/api/cleanup/smart-queue','Clear Smart Queue logs'); if(e.target.closest('#cleanupAllBtn')) return runCleanupAction('/api/cleanup/all','Clear job and Smart Queue logs'); }); $('rtConfigReloadBtn')?.addEventListener('click',loadRtConfig); $('rtConfigSaveBtn')?.addEventListener('click',saveRtConfig); $('rtConfigGenerateBtn')?.addEventListener('click',generateRtConfig); $('rtConfigManager')?.addEventListener('input',e=>{ if(e.target.classList.contains('rt-config-input')) updateRtConfigDirty(); }); $('rtConfigManager')?.addEventListener('change',e=>{ if(e.target.classList.contains('rt-config-input')) updateRtConfigDirty(); }); $('rtConfigApplyOnStart')?.addEventListener('change',updateRtConfigDirty); $('peersRefreshSelect')?.addEventListener('change',async e=>{peersRefreshSeconds=Number(e.target.value||0); await post('/api/preferences',{peers_refresh_seconds:peersRefreshSeconds}).catch(()=>{}); setupPeersRefresh(activeTab()); toast('Peers refresh preference saved','success');}); + $('autoConditionType')?.addEventListener('change',updateAutomationForm); $('autoEffectType')?.addEventListener('change',updateAutomationForm); $('automationSaveBtn')?.addEventListener('click',saveAutomation); $('automationCheckBtn')?.addEventListener('click',async()=>{setBusy(true);try{const j=await post('/api/automations/check',{}); toast(`Automations applied ${j.result?.applied?.length||0} item(s)`,'success'); await loadAutomations();}catch(e){toast(e.message,'danger');}finally{setBusy(false);}}); $('automationManager')?.addEventListener('click',async e=>{const id=e.target.closest('.automation-delete')?.dataset.id;if(!id)return;if(!confirm('Delete this automation rule?'))return;const r=await fetch('/api/automations/'+id,{method:'DELETE'});const j=await r.json();if(!j.ok)toast(j.error||'Delete failed','danger');await loadAutomations();}); + document.addEventListener('click',async e=>{ const btn=e.target.closest('.delete-label'); if(!btn)return; if(!confirm('Delete this label?')) return; setBusy(true); try{ const r=await fetch('/api/labels/'+btn.dataset.id,{method:'DELETE'}); const j=await r.json(); if(!j.ok) throw new Error(j.error||'Delete failed'); await loadLabels(); toast('Label deleted','success'); }catch(err){toast(err.message,'danger');} finally{setBusy(false);} }); + $('bulkClearBtn')?.addEventListener('click',()=>{selected.clear(); selectedHash=null; lastSelectedHash=null; updateBulkBar(); if($('selectAll')) $('selectAll').checked=false; if($('detailPane')) $('detailPane').innerHTML='Select a torrent.'; setupPeersRefresh('general'); scheduleRender(true);}); + $('smartExcludeSelectedBtn')?.addEventListener('click',()=>setSmartException(selectedHashes(),true,'manual')); + $('smartIncludeSelectedBtn')?.addEventListener('click',()=>setSmartException(selectedHashes(),false,'manual')); + $('smartHistory')?.addEventListener('click',e=>{ const btn=e.target.closest('#smartHistoryToggle'); if(!btn) return; smartHistoryExpanded=!smartHistoryExpanded; loadSmartQueue(); }); + + document.addEventListener('change',e=>{ const sel=e.target.closest('#mobileFilterSelect'); if(!sel)return; activeFilter=sel.value; document.querySelectorAll('.filter').forEach(x=>x.classList.toggle('active', x.dataset.filter===activeFilter)); if($('tableWrap'))$('tableWrap').scrollTop=0; if($('mobileList'))$('mobileList').scrollTop=0; scheduleRender(true); }); + function awaitMaybeRun(action){ runAction(action).catch?.(()=>{}); } + document.addEventListener('click',e=>{ const ctx=$('ctxMenu'); if(!e.target.closest('#ctxMenu')) ctx.style.display='none'; const mobileFilter=e.target.closest('#mobileFilterBar .mobile-filter'); if(mobileFilter){ document.querySelectorAll('.filter').forEach(x=>x.classList.remove('active')); document.querySelectorAll('.filter').forEach(x=>{ if(x.dataset.filter===mobileFilter.dataset.filter) x.classList.add('active'); }); activeFilter=mobileFilter.dataset.filter; if($('tableWrap'))$('tableWrap').scrollTop=0; if($('mobileList'))$('mobileList').scrollTop=0; scheduleRender(true); return; } const mobileSelectAll=e.target.closest('#mobileSelectAll'); if(mobileSelectAll){ const all=visibleRows.length>0 && visibleRows.every(t=>selected.has(t.hash)); if(all) visibleRows.forEach(t=>selected.delete(t.hash)); else visibleRows.forEach(t=>selected.add(t.hash)); if(selected.size===0){selectedHash=null;lastSelectedHash=null;} else {selectedHash=[...selected][selected.size-1];lastSelectedHash=selectedHash;} scheduleRender(true); return; } const mobileClear=e.target.closest('#mobileClearSelection'); if(mobileClear){ selected.clear(); selectedHash=null; lastSelectedHash=null; scheduleRender(true); return; } const mobileAct=e.target.closest('.mobile-card [data-action]'); if(mobileAct){ const card0=mobileAct.closest('.mobile-card'); selected.clear(); selected.add(card0.dataset.hash); selectedHash=card0.dataset.hash; awaitMaybeRun(mobileAct.dataset.action); scheduleRender(true); return; } const card=e.target.closest('.mobile-card'); const tr=e.target.closest('tr[data-hash]'); const row=tr||card; if(row){ const h=row.dataset.hash; const additive=e.ctrlKey||e.metaKey; if(e.shiftKey){ setSelectionRange(h, additive); } else if(e.target.classList.contains('row-check')){ e.target.checked?selected.add(h):selected.delete(h); lastSelectedHash=h; selectedHash=h; } else { selectedHash=h; if(!additive)selected.clear(); selected.add(h); lastSelectedHash=h; loadDetails(activeTab()); } scheduleRender(true); } const copy=e.target.closest('[data-copy]'); if(copy) copySelected(copy.dataset.copy); const smartEx=e.target.closest('#smartExcludeCtx'); if(smartEx){ selectedHashes().forEach(h=>post('/api/smart-queue/exclusion',{hash:h,excluded:true,reason:'manual'}).catch(()=>{})); toast('Smart Queue exception saved','success'); loadSmartQueue().catch(()=>{}); } const act=e.target.closest('.torrent-action,[data-action]'); if(act&&act.dataset.action&&!act.closest('#detailTabs')&&!act.closest('.mobile-card')) runAction(act.dataset.action); }); + document.addEventListener('contextmenu',e=>{ const tr=e.target.closest('tr[data-hash],.mobile-card'); if(!tr)return; e.preventDefault(); selectedHash=tr.dataset.hash; if(!selected.has(selectedHash)){selected.clear();selected.add(selectedHash);scheduleRender(true);} const m=$('ctxMenu'); m.style.left=`${e.pageX}px`; m.style.top=`${e.pageY}px`; m.style.display='block'; }); + document.querySelectorAll('.torrent-table thead th[data-sort]').forEach(th=>th.addEventListener('click',()=>{ const key=th.dataset.sort; if(sortState.key===key) sortState.dir*=-1; else sortState={key,dir:1}; scheduleRender(true); })); $('tableWrap')?.addEventListener('scroll',()=>scheduleRender(false),{passive:true}); $('selectAll')?.addEventListener('change',e=>{selected.clear(); if(e.target.checked)visibleRows.forEach(t=>selected.add(t.hash)); scheduleRender(true);}); $('searchBox')?.addEventListener('input',()=>{if($('tableWrap'))$('tableWrap').scrollTop=0;scheduleRender(true);}); document.querySelectorAll('.filter').forEach(b=>b.addEventListener('click',()=>{document.querySelectorAll('.filter').forEach(x=>x.classList.remove('active')); b.classList.add('active'); activeFilter=b.dataset.filter; if($('tableWrap'))$('tableWrap').scrollTop=0; scheduleRender(true);})); document.querySelectorAll('#detailTabs .nav-link').forEach(b=>b.addEventListener('click',()=>{document.querySelectorAll('#detailTabs .nav-link').forEach(x=>x.classList.remove('active')); b.classList.add('active'); loadDetails(b.dataset.tab);})); document.addEventListener('change',e=>{ const sel=e.target.closest('.file-priority'); if(sel){ setFilePriorities([{index:Number(sel.dataset.index),priority:Number(sel.value)}]); return; } if(e.target && e.target.id==='fileSelectAll'){ document.querySelectorAll('#detailPane .file-check').forEach(cb=>cb.checked=e.target.checked); } }); document.addEventListener('click',e=>{ const bulk=e.target.closest('.file-priority-bulk'); if(!bulk) return; const priority=Number(bulk.dataset.priority); const checked=[...document.querySelectorAll('#detailPane .file-check:checked')].map(cb=>({index:Number(cb.dataset.index),priority})); if(!checked.length) return toast('No files selected','warning'); setFilePriorities(checked); }); document.addEventListener('click',e=>{ const b=e.target.closest('.peer-action'); if(!b) return; peerAction(b.dataset.peerIndex,b.dataset.peerAction); }); document.addEventListener('click',e=>{ const add=e.target.closest('#trackerAddBtn'); if(add){ const url=$('trackerAddUrl')?.value||''; trackerAction('add',{url}); return; } const editStart=e.target.closest('.tracker-edit-start'); if(editStart){ setTrackerEdit(editStart.dataset.index,true); return; } const cancel=e.target.closest('.tracker-edit-cancel'); if(cancel){ setTrackerEdit(cancel.dataset.index,false); return; } const save=e.target.closest('.tracker-edit-save'); if(save){ const input=document.querySelector(`.tracker-url[data-tracker-index="${CSS.escape(String(save.dataset.index))}"]`); trackerAction('edit',{index:Number(save.dataset.index),url:input?.value||''}); return; } const rea=e.target.closest('#trackerReannounceBtn'); if(rea) trackerAction('reannounce',{}); }); $('appStatusRefreshBtn')?.addEventListener('click',loadAppStatus); $('portCheckEnabled')?.addEventListener('change',savePortCheckPref); $('portCheckNowBtn')?.addEventListener('click',()=>loadPortCheck(true)); $('bootstrapThemeSelect')?.addEventListener('change',saveAppearancePreferences); $('fontFamilySelect')?.addEventListener('change',saveAppearancePreferences); + document.addEventListener('keydown',e=>{ const tag=(e.target?.tagName||'').toLowerCase(); const editable=tag==='input'||tag==='textarea'||tag==='select'||e.target?.isContentEditable; if(editable){ if(e.key==='Enter' && e.target?.id==='labelInput'){ e.preventDefault(); $('addLabelToSelectionBtn')?.click(); } return; } if((e.ctrlKey||e.metaKey)&&e.key.toLowerCase()==='a'){e.preventDefault();selected.clear();visibleRows.forEach(t=>selected.add(t.hash));scheduleRender(true);} if((e.ctrlKey||e.metaKey)&&e.key.toLowerCase()==='i'){e.preventDefault();visibleRows.forEach(t=>selected.has(t.hash)?selected.delete(t.hash):selected.add(t.hash));scheduleRender(true);} if((e.ctrlKey||e.metaKey)&&e.key.toLowerCase()==='o'){e.preventDefault();new bootstrap.Modal($('addModal')).show();} if(e.key==='Escape'){selected.clear();scheduleRender(true);} if(e.key==='Delete') new bootstrap.Modal($('removeModal')).show(); if(e.key===' ') {e.preventDefault();runAction('start');} if(e.key.toLowerCase()==='p')runAction('pause'); if(e.key.toLowerCase()==='s')runAction('stop'); if(e.key.toLowerCase()==='r')runAction('resume'); if(e.key.toLowerCase()==='m')runAction('move'); }); + $('removeModal')?.addEventListener('show.bs.modal',()=>{$('removeCount').textContent=selected.size;$('removeData').checked=true;}); $('confirmRemoveBtn')?.addEventListener('click',async()=>{await runAction('remove',{remove_data:$('removeData').checked});bootstrap.Modal.getInstance($('removeModal'))?.hide();}); + $('addModal')?.addEventListener('show.bs.modal',()=>applyDefaultDownloadPath(true)); + $('toolsModal')?.addEventListener('show.bs.modal',()=>applyDefaultDownloadPath(false)); + $('addBtn')?.addEventListener('click',async()=>{const btn=$('addBtn');buttonBusy(btn,true);setBusy(true);try{const fd=new FormData();fd.append('uris',$('magnetInput').value);fd.append('directory',$('addPath').value);fd.append('label',$('addLabel').value);fd.append('start',$('addStart').checked?'1':'0');[...($('torrentFiles')?.files||[])].forEach(f=>fd.append('files',f));const j=await (await fetch('/api/torrents/add',{method:'POST',body:fd})).json();if(!j.ok)throw new Error(j.error||'Add failed');$('magnetInput').value='';$('torrentFiles').value='';toast('Add queued','success');bootstrap.Modal.getInstance($('addModal'))?.hide();}catch(e){toast(e.message,'danger');}finally{buttonBusy(btn,false);setBusy(false);}}); $('torrentFiles')?.addEventListener('change',()=>{$('torrentFilesInfo').textContent=$('torrentFiles').files.length?`Selected files: ${$('torrentFiles').files.length}`:'You can select multiple files at once.';}); + const mbpsToKib=mbps=>mbps?Math.round((Number(mbps)*1000000/8)/1024):0; + const kibToMbps=kib=>kib?Math.round((Number(kib)*1024*8)/1000000):0; + function setLimitSliderMax(slider,mbps){ if(slider && mbps>Number(slider.max||0)) slider.max=String(mbps); } + function setLimitValue(targetId,kib){ const input=$(targetId); if(input) input.value=Math.max(0,Math.round(Number(kib)||0)); } + function updateLimitSlider(slider){ if(!slider) return; const input=$(slider.dataset.target); const out=$(slider.dataset.output); const mbps=kibToMbps(Number(input?.value||0)); setLimitSliderMax(slider,mbps); slider.value=String(mbps); if(out) out.textContent=mbps?`${mbps} Mbit/s`:'Unlimited'; } + function updateLimitSliders(){ document.querySelectorAll('.limit-slider').forEach(updateLimitSlider); } + function syncLimitInputFromSlider(slider){ const mbps=Number(slider.value||0); setLimitValue(slider.dataset.target,mbpsToKib(mbps)); updateLimitSlider(slider); } + document.querySelectorAll('.limit-preset').forEach(b=>b.addEventListener('click',()=>{const kib=mbpsToKib(Number(b.dataset.mbps||0));setLimitValue('limitDown',kib);setLimitValue('limitUp',kib);updateLimitSliders();})); + document.querySelectorAll('.limit-slider').forEach(slider=>slider.addEventListener('input',()=>syncLimitInputFromSlider(slider))); + ['limitDown','limitUp'].forEach(id=>$(id)?.addEventListener('input',updateLimitSliders)); + $('saveSpeedBtn')?.addEventListener('click',async()=>{const btn=$('saveSpeedBtn');buttonBusy(btn,true);setBusy(true);try{await post('/api/speed/limits',{down:Math.round(Number($('limitDown').value||0)*1024),up:Math.round(Number($('limitUp').value||0)*1024)});toast('Speed limits queued','success');bootstrap.Modal.getInstance($('speedModal'))?.hide();}catch(e){toast(e.message,'danger');}finally{buttonBusy(btn,false);setBusy(false);}}); $('speedModal')?.addEventListener('show.bs.modal',()=>{setLimitValue('limitDown',lastLimits.down?Math.round(lastLimits.down/1024):0);setLimitValue('limitUp',lastLimits.up?Math.round(lastLimits.up/1024):0);updateLimitSliders();}); + async function refreshProfiles(){ $('profileList').innerHTML='Loading profiles...'; const j=await (await fetch('/api/profiles')).json(); const active=j.active?.id; profileCache=new Map((j.profiles||[]).map(p=>[String(p.id),p])); $('profileList').innerHTML=(j.profiles||[]).map(p=>`
${esc(p.name)} ${p.id===active?"active":''} ${p.is_remote?"remote":''}${esc(p.scgi_url)} · jobs ${esc(p.max_parallel_jobs||5)}${p.is_remote?' · remote CPU/RAM/IP':''}
`).join('')||'No profiles.'; } + function resetProfileForm(){ if($('profileId')) $('profileId').value=''; if($('profileName')) $('profileName').value=''; if($('profileUrl')) $('profileUrl').value=''; if($('profileTimeout')) $('profileTimeout').value='5'; if($('profileParallel')) $('profileParallel').value='5'; if($('profileRemote')) $('profileRemote').checked=false; if($('profileFormTitle')) $('profileFormTitle').textContent='Add one rTorrent'; if($('saveProfileBtn')) $('saveProfileBtn').innerHTML=' Add profile'; $('cancelProfileEditBtn')?.classList.add('d-none'); } + function editProfileForm(profile){ if(!profile) return; if($('profileId')) $('profileId').value=profile.id; if($('profileName')) $('profileName').value=profile.name||''; if($('profileUrl')) $('profileUrl').value=profile.scgi_url||''; if($('profileTimeout')) $('profileTimeout').value=profile.timeout_seconds||5; if($('profileParallel')) $('profileParallel').value=profile.max_parallel_jobs||5; if($('profileRemote')) $('profileRemote').checked=!!profile.is_remote; if($('profileFormTitle')) $('profileFormTitle').textContent='Edit rTorrent'; if($('saveProfileBtn')) $('saveProfileBtn').innerHTML=' Save profile'; $('cancelProfileEditBtn')?.classList.remove('d-none'); $('profileName')?.focus(); } + $('profileModal')?.addEventListener('show.bs.modal',refreshProfiles); $('profileList')?.addEventListener('click',async e=>{const btn=e.target.closest('[data-del-profile],[data-use-profile],[data-edit-profile]'); const del=btn?.dataset.delProfile,use=btn?.dataset.useProfile,edit=btn?.dataset.editProfile;if(edit){editProfileForm(profileCache.get(String(edit)));return;} if(del){setBusy(true);await fetch(`/api/profiles/${del}`,{method:'DELETE'});setBusy(false);refreshProfiles();location.reload();} if(use){setBusy(true);await post(`/api/profiles/${use}/activate`,{});setBusy(false);location.reload();}}); $('cancelProfileEditBtn')?.addEventListener('click',resetProfileForm); $('saveProfileBtn')?.addEventListener('click',async()=>{setBusy(true);const id=$('profileId')?.value;const payload={name:$('profileName').value,scgi_url:$('profileUrl').value,timeout_seconds:$('profileTimeout').value,max_parallel_jobs:$('profileParallel').value,is_remote:$('profileRemote')?.checked};const j=await post(id?`/api/profiles/${id}`:'/api/profiles',payload,id?'PUT':'POST').catch(e=>toast(e.message,'danger'));setBusy(false);if(j?.profile)location.reload();}); $('saveBulkProfilesBtn')?.addEventListener('click',async()=>{const lines=($('bulkProfiles').value||'').split(/\n+/).map(x=>x.trim()).filter(Boolean);setBusy(true);try{for(const line of lines){const [name,scgi_url]=line.split('|').map(x=>x.trim());if(name&&scgi_url)await post('/api/profiles',{name,scgi_url,timeout_seconds:$('profileTimeout').value,max_parallel_jobs:$('profileParallel').value,is_remote:$('profileRemote')?.checked});}location.reload();}catch(e){toast(e.message,'danger');}finally{setBusy(false);}}); $('profileSelect')?.addEventListener('change',async e=>{await post(`/api/profiles/${e.target.value}/activate`,{});const opt=e.target.selectedOptions?.[0];if($('activeProfileName') && opt) $('activeProfileName').textContent=opt.textContent || 'rTorrent';bootstrap.Modal.getInstance($('profilePickerModal'))?.hide();defaultDownloadPath=null;applyDefaultDownloadPath(true).catch(()=>{});socket.emit('select_profile',{profile_id:e.target.value});hasTorrentSnapshot=false;torrents.clear();selected.clear();scheduleRender(true);}); $('themeToggle')?.addEventListener('click',async()=>{const cur=document.documentElement.dataset.bsTheme==='dark'?'light':'dark';document.documentElement.dataset.bsTheme=cur;await post('/api/preferences',{theme:cur}).catch(()=>{});}); $('mobileToggle')?.addEventListener('click',()=>{document.body.classList.toggle('mobile-mode-manual');syncMobileMode();}); window.addEventListener('resize',()=>syncMobileMode(),{passive:true}); syncMobileMode(); + function drawTraffic(down,up){ traffic.push({down:Number(down||0),up:Number(up||0)}); if(traffic.length>60)traffic.shift(); const c=$('trafficChart'); if(!c)return; const ctx=c.getContext('2d'),w=c.width,h=c.height; ctx.clearRect(0,0,w,h); const max=Math.max(1,...traffic.map(p=>Math.max(p.down,p.up))); ctx.beginPath(); traffic.forEach((p,i)=>{const x=i*(w/59),y=h-(p.down/max)*h; i?ctx.lineTo(x,y):ctx.moveTo(x,y);}); ctx.strokeStyle='#38bdf8'; ctx.stroke(); ctx.beginPath(); traffic.forEach((p,i)=>{const x=i*(w/59),y=h-(p.up/max)*h; i?ctx.lineTo(x,y):ctx.moveTo(x,y);}); ctx.strokeStyle='#f59e0b'; ctx.stroke(); } + function drawSystemUsage(cpu,ram){ + const c=$('systemChart'); if(!c) return; + const cpuVal=Math.max(0,Math.min(100,Number(cpu||0))); + const ramVal=Math.max(0,Math.min(100,Number(ram||0))); + systemUsage.push({cpu:cpuVal,ram:ramVal}); if(systemUsage.length>60) systemUsage.shift(); + const ctx=c.getContext('2d'), w=c.width, h=c.height; ctx.clearRect(0,0,w,h); + ctx.fillStyle='rgba(148,163,184,.18)'; ctx.fillRect(0,0,w,h); + ctx.beginPath(); systemUsage.forEach((p,i)=>{const x=i*(w/Math.max(1,systemUsage.length-1)), y=h-(p.cpu/100)*h; i?ctx.lineTo(x,y):ctx.moveTo(x,y);}); ctx.strokeStyle='#a78bfa'; ctx.stroke(); + ctx.beginPath(); systemUsage.forEach((p,i)=>{const x=i*(w/Math.max(1,systemUsage.length-1)), y=h-(p.ram/100)*h; i?ctx.lineTo(x,y):ctx.moveTo(x,y);}); ctx.strokeStyle='#22c55e'; ctx.stroke(); + c.title=`CPU ${cpuVal.toFixed(1)}% / RAM ${ramVal.toFixed(1)}%`; + } + function drawDiskUsage(disk){ + const box=$('diskStatus'), label=$('statDisk'), c=$('diskChart'); + if(!box||!label||!c)return; + const ctx=c.getContext('2d'), w=c.width, h=c.height; + ctx.clearRect(0,0,w,h); + const ok=disk&&disk.ok; + const pct=ok?Math.max(0,Math.min(100,Number(disk.percent||0))):0; + label.textContent=ok?`${pct.toFixed(pct%1?1:0)}%`:'-'; + box.classList.toggle('disk-warn', !ok || pct>=90); + box.title=ok?`Disk ${disk.path||'default path'} +Used: ${disk.used_h||'-'} / ${disk.total_h||'-'} +Free: ${disk.free_h||'-'}${disk.fallback?` +Measured on: ${disk.source_path}`:''}`:`Disk usage unavailable${disk?.error?` +${disk.error}`:''}`; + ctx.fillStyle='rgba(148,163,184,.22)'; ctx.fillRect(0,5,w,14); + ctx.fillStyle=pct>=90?'#ef4444':pct>=75?'#f59e0b':'#22c55e'; ctx.fillRect(0,5,Math.round(w*pct/100),14); + ctx.strokeStyle='rgba(148,163,184,.55)'; ctx.strokeRect(.5,5.5,w-1,13); + } + async function loadTrafficHistory(range="7d"){ + const info=$('trafficHistoryInfo'); + const volume=$('trafficHistoryChart'); + const speed=$('trafficSpeedChart'); + if(info) info.textContent='Loading...'; + try{ + const res=await fetch(`/api/traffic/history?range=${encodeURIComponent(range)}`); + const j=await res.json(); + if(!j.ok) throw new Error(j.error||'Failed to load history'); + drawTrafficHistory(j.history||{rows:[]}); + if(info){ + const rows=(j.history&&j.history.rows)||[]; + const bucket=(j.history&&j.history.bucket)||'bucket'; + info.textContent=rows.length ? `${rows.length} ${bucket} bucket(s), retention ${j.history?.retention_days||90} days.` : 'No retained samples yet. Data is stored every minute while pyTorrent is running.'; + } + }catch(e){ + if(info) info.textContent=e.message; + [volume,speed].forEach(c=>{ if(c) c.getContext('2d').clearRect(0,0,c.width,c.height); }); + } + } + function setupCanvas(canvas){ + const rect=canvas.getBoundingClientRect(); + const dpr=window.devicePixelRatio||1; + const cssW=Math.max(320, Math.floor(rect.width || canvas.parentElement?.clientWidth || 900)); + const cssH=Math.max(320, Math.floor(rect.height || 420)); + canvas.width=Math.floor(cssW*dpr); canvas.height=Math.floor(cssH*dpr); + const ctx=canvas.getContext('2d'); ctx.setTransform(dpr,0,0,dpr,0,0); + return {ctx,w:cssW,h:cssH}; + } + function drawAxes(ctx,w,h){ ctx.strokeStyle='rgba(148,163,184,.35)'; ctx.lineWidth=1; ctx.beginPath(); ctx.moveTo(42,12); ctx.lineTo(42,h-28); ctx.lineTo(w-12,h-28); ctx.stroke(); } + function fmtBytes(v){ v=Number(v||0); const u=['B','KiB','MiB','GiB','TiB']; let i=0; while(v>=1024&&i{ const x=42+(i*Math.max(1,(w-58)/Math.max(1,pts.length-1))); const y=h-28-p; i?ctx.lineTo(x,y):ctx.moveTo(x,y); }); ctx.stroke(); } + function drawTrafficHistory(hist){ + const rows=hist.rows||[]; + const volume=$('trafficHistoryChart'), speed=$('trafficSpeedChart'); + if(!volume||!speed) return; + const bodyColor=getComputedStyle(document.body).color; + const muted='rgba(148,163,184,.75)'; + + function legend(ctx, x, y, unit){ + ctx.fillStyle=bodyColor; ctx.font='12px system-ui'; ctx.fillText(`Download / Upload (${unit})`, x, y); + ctx.fillStyle='#38bdf8'; ctx.fillRect(x, y+7, 10, 10); ctx.fillStyle=bodyColor; ctx.fillText('Download', x+14, y+17); + ctx.fillStyle='#f59e0b'; ctx.fillRect(x+92, y+7, 10, 10); ctx.fillStyle=bodyColor; ctx.fillText('Upload', x+106, y+17); + } + function yLabels(ctx, max, suffix, w, h){ + ctx.fillStyle=muted; ctx.font='11px system-ui'; + ctx.fillText(fmtBytes(max)+suffix, 6, 18); + ctx.fillText(fmtBytes(max/2)+suffix, 6, Math.round((h-28+12)/2)); + ctx.fillText('0 '+suffix.trim(), 24, h-12); + } + function xLabels(ctx, values, w, h){ + if(!values.length) return; + ctx.fillStyle=muted; ctx.font='11px system-ui'; + const first=String(values[0]||''), last=String(values[values.length-1]||''); + ctx.fillText(first.slice(-10), 44, h-8); + const tw=ctx.measureText(last.slice(-10)).width; + ctx.fillText(last.slice(-10), Math.max(48, w-12-tw), h-8); + } + + let c=setupCanvas(volume), ctx=c.ctx,w=c.w,h=c.h; ctx.clearRect(0,0,w,h); drawAxes(ctx,w,h); + if(!rows.length){ + ctx.fillStyle=bodyColor; ctx.fillText('No history yet. Samples appear after pyTorrent records traffic.',52,36); + const sc=setupCanvas(speed); sc.ctx.clearRect(0,0,sc.w,sc.h); sc.ctx.fillStyle=bodyColor; sc.ctx.fillText('No speed samples yet.',52,36); + return; + } + const labels=rows.map(r=>r.bucket); + const maxVol=Math.max(1,...rows.map(r=>Math.max(Number(r.downloaded||0),Number(r.uploaded||0)))); + const usable=w-58, bw=Math.max(2, Math.min(26, usable/rows.length-3)); + rows.forEach((r,i)=>{ + const x=44+i*(usable/rows.length); const dh=(Number(r.downloaded||0)/maxVol)*(h-60); const uh=(Number(r.uploaded||0)/maxVol)*(h-60); + ctx.fillStyle='#38bdf8'; ctx.fillRect(x,h-28-dh,bw/2,dh); + ctx.fillStyle='#f59e0b'; ctx.fillRect(x+bw/2,h-28-uh,bw/2,uh); + }); + legend(ctx,52,16,'data'); yLabels(ctx,maxVol,'',w,h); xLabels(ctx,labels,w,h); + + c=setupCanvas(speed); ctx=c.ctx; w=c.w; h=c.h; ctx.clearRect(0,0,w,h); drawAxes(ctx,w,h); + const maxSpeed=Math.max(1,...rows.map(r=>Math.max(Number(r.avg_down_rate||0),Number(r.avg_up_rate||0)))); + const scale=h-60; const dl=rows.map(r=>Number(r.avg_down_rate||0)/maxSpeed*scale); const ul=rows.map(r=>Number(r.avg_up_rate||0)/maxSpeed*scale); + drawLine(ctx,dl,w,h,'#38bdf8'); drawLine(ctx,ul,w,h,'#f59e0b'); + legend(ctx,52,16,'B/s'); yLabels(ctx,maxSpeed,'/s',w,h); xLabels(ctx,labels,w,h); + } + $('trafficModal')?.addEventListener("show.bs.modal",()=>loadTrafficHistory("7d")); + document.querySelectorAll(".traffic-range").forEach(b=>b.addEventListener("click",()=>{ + document.querySelectorAll(".traffic-range").forEach(x=>{x.classList.remove("btn-primary");x.classList.add("btn-outline-secondary");}); + b.classList.add("btn-primary"); b.classList.remove("btn-outline-secondary"); + loadTrafficHistory(b.dataset.range||"7d"); + })); + socket.on('connect',()=>{ $('connBadge').className='badge text-bg-success'; $('connBadge').textContent='online'; setInitialLoader('Loading torrents...','Connection is ready. Waiting for the first torrent snapshot.'); socket.emit('select_profile',{profile_id:window.PYTORRENT.activeProfile}); }); socket.on('disconnect',()=>{ $('connBadge').className='badge text-bg-danger'; $('connBadge').textContent='offline'; setInitialLoader('Waiting for connection...','pyTorrent is not connected yet. The application will open after data is received.'); }); socket.io.on('reconnect_attempt',()=>{ $('connBadge').className='badge text-bg-warning'; $('connBadge').textContent='reconnecting'; setInitialLoader('Reconnecting...','Trying to restore the live connection and load torrent data.'); }); socket.io.on('reconnect',()=>{ $('connBadge').className='badge text-bg-success'; $('connBadge').textContent='online'; setInitialLoader('Loading torrents...','Connection restored. Waiting for the first torrent snapshot.'); socket.emit('select_profile',{profile_id:window.PYTORRENT.activeProfile}); }); socket.on('torrent_snapshot',msg=>{hasTorrentSnapshot=true;torrentSummary=msg.summary||null;torrents.clear();(msg.torrents||[]).forEach(t=>torrents.set(t.hash,t));scheduleRender(true);hideInitialLoader();}); socket.on('torrent_patch',patchRows); socket.on('job_update',()=>{ if(document.body.classList.contains('modal-open')) loadJobs().catch(()=>{}); }); socket.on('operation_started',msg=>{setBusy(true);markTorrentOperation(msg.hashes||[],msg.action,msg.job_id,'running');toast(`${msg.action} started`,'secondary');}); socket.on('operation_finished',msg=>{setBusy(false);clearJobOperation(msg.job_id,msg.hashes||[]);toast(`${msg.action} done`,'success');}); socket.on('operation_failed',msg=>{setBusy(false);clearJobOperation(msg.job_id,msg.hashes||[]);toast(`${msg.action}: ${msg.error}`,'danger');}); socket.on('rtorrent_error',msg=>{ if(msg.error){$('connBadge').className='badge badge-degraded';$('connBadge').textContent='degraded'; setInitialLoader('Waiting for rTorrent...','rTorrent is not ready yet. Data will appear automatically after it responds.');} }); socket.on('heartbeat',msg=>{ if(msg.error){$('connBadge').className='badge badge-degraded';$('connBadge').textContent='degraded'; setInitialLoader('Waiting for rTorrent...','rTorrent is not ready yet. Data will appear automatically after it responds.');} else if(socket.connected){$('connBadge').className='badge text-bg-success';$('connBadge').textContent='online';} }); socket.on('smart_queue_update',msg=>{ if(msg && msg.enabled) toast(`Smart Queue: paused ${msg.paused?.length||0}, resumed ${msg.resumed?.length||0}`,'secondary'); }); socket.on('automation_update',msg=>{ if(msg?.applied?.length) toast(`Automations applied ${msg.applied.length} item(s)`,'secondary'); }); socket.on('rtorrent_config_applied',msg=>{ if(msg?.result?.updated?.length) toast(`Startup rTorrent config applied (${msg.result.updated.length})`,'success'); if(msg?.error) toast(`Startup rTorrent config: ${msg.error}`,'danger'); }); socket.on('system_stats',s=>{ const usageAvailable=s.usage_available!==false && s.cpu!==undefined && s.ram!==undefined; $('statCpuBox')?.classList.toggle('d-none',!usageAvailable);$('statRamBox')?.classList.toggle('d-none',!usageAvailable);$('systemChart')?.classList.toggle('d-none',!usageAvailable); if(usageAvailable){$('statCpu').textContent=s.cpu??'-';$('statRam').textContent=s.ram??'-';drawSystemUsage(s.cpu,s.ram);} $('statVersion').textContent=s.version||'-';$('statDl').textContent=s.down_rate_h||'0 B/s';$('statUl').textContent=s.up_rate_h||'0 B/s';if($('mobileSpeedDl')) $('mobileSpeedDl').textContent=s.down_rate_h||'0 B/s';if($('mobileSpeedUl')) $('mobileSpeedUl').textContent=s.up_rate_h||'0 B/s';lastLimits={down:Number(s.down_limit||0),up:Number(s.up_limit||0)};$('statDlLimit').textContent=s.down_limit_h||'∞';$('statUlLimit').textContent=s.up_limit_h||'∞';$('statTotalDl').textContent=compactTransferText(s.total_down_h);$('statTotalUl').textContent=compactTransferText(s.total_up_h);drawTraffic(s.down_rate,s.up_rate);drawDiskUsage(s.disk);}); + updateSortHeaders(); applyColumnVisibility(); renderColumnManager(); scheduleRender(true); loadLabels().catch(()=>{}); loadRatios().catch(()=>{}); loadSmartQueue().catch(()=>{}); loadAutomations().catch(()=>{}); if(portCheckEnabled) loadPortCheck(false); else renderPortCheck({status:'disabled',enabled:false}); applyDefaultDownloadPath(false).catch(()=>{}); +})(); diff --git a/pytorrent/static/styles.css b/pytorrent/static/styles.css new file mode 100644 index 0000000..f6233b8 --- /dev/null +++ b/pytorrent/static/styles.css @@ -0,0 +1,808 @@ +:root { + --app-font-family: Inter, system-ui, -apple-system, Segoe UI, Roboto, Arial, sans-serif; + --topbar: 50px; + --statusbar: 34px; + --sidebar: 270px; + --torrent-progress-complete: #198754; +} +[data-bs-theme="dark"] { + --bs-body-bg: #05070a; + --bs-body-bg-rgb: 5,7,10; + --bs-body-color: #d6dde8; + --bs-secondary-bg: #0a0f16; + --bs-secondary-bg-rgb: 10,15,22; + --bs-tertiary-bg: #0e141d; + --bs-border-color: #1d2734; + --bs-secondary-color: #8d98aa; + --bs-primary-bg-subtle: #0d2238; + --bs-primary-text-emphasis: #9ecbff; + --torrent-progress-complete: #2f9e75; +} + +html[data-app-font="adwaita-mono"] { --app-font-family: "Adwaita Mono", ui-monospace, SFMono-Regular, Menlo, Consolas, "Liberation Mono", monospace; } +html[data-app-font="inter"] { --app-font-family: Inter, system-ui, -apple-system, Segoe UI, Roboto, Arial, sans-serif; } +html[data-app-font="system-ui"] { --app-font-family: system-ui, -apple-system, Segoe UI, Roboto, Arial, sans-serif; } +html[data-app-font="source-sans-3"] { --app-font-family: "Source Sans 3", "Source Sans Pro", system-ui, -apple-system, Segoe UI, Roboto, Arial, sans-serif; } +html[data-app-font="jetbrains-mono"] { --app-font-family: "JetBrains Mono", ui-monospace, SFMono-Regular, Menlo, Consolas, "Liberation Mono", monospace; } +html, body { height: 100%; } +body { + overflow: hidden; + font-size: 13px; + padding: 8px; + background: #05070a; + font-family: var(--app-font-family); +} +.app-shell { + height: calc(100vh - 16px); + display: grid; + grid-template-rows: var(--topbar) 1fr var(--statusbar); + background: var(--bs-body-bg); + border: 1px solid var(--bs-border-color); + border-radius: 12px; + overflow: hidden; + box-shadow: 0 12px 45px rgba(0,0,0,.38); +} +.topbar { + display: flex; + align-items: center; + justify-content: space-between; + gap: .75rem; + padding: .42rem .7rem; + min-height: var(--topbar); + background: var(--bs-secondary-bg); +} +.toolbar-left, .toolbar-right { display: flex; align-items: center; gap: .45rem; min-width: 0; } +.toolbar-left { flex: 0 1 auto; overflow: hidden; } +.toolbar-right { flex: 1 1 0; justify-content: flex-end; margin-left: auto; } +.brand { font-weight: 800; font-size: 1.05rem; letter-spacing: .2px; white-space: nowrap; line-height: 32px; } +.profile-picker-btn { max-width: 180px; } +.profile-picker-btn span { overflow: hidden; text-overflow: ellipsis; white-space: nowrap; } +.profile-select { width: 100%; } +.search { width: min(38vw, 420px); min-width: clamp(160px, 20vw, 220px); max-width: 420px; flex: 0 1 420px; } +.mobile-speed-stats { display: none; align-items: center; gap: .45rem; flex: 0 0 auto; color: var(--bs-secondary-color); font-size: .72rem; white-space: nowrap; } +.mobile-speed-stats b { color: var(--bs-body-color); font-weight: 700; } +.topbar .form-control, .topbar .form-select { height: 32px; line-height: 1.15; } +.topbar .btn { min-height: 28px; line-height: 1; } +#themeToggle, #mobileToggle { width: 32px; min-width: 32px; display: inline-flex; align-items: center; justify-content: center; } +.spinner-border-xs { width: .75rem; height: .75rem; border-width: .12em; vertical-align: -1px; } +.global-loader { + position: fixed; + right: 14px; + bottom: 44px; + z-index: 7000; + display: inline-flex; + align-items: center; + gap: .4rem; + padding: .4rem .65rem; + border-radius: 999px; + background: var(--bs-tertiary-bg); + color: var(--bs-body-color); + border: 1px solid var(--bs-border-color); + box-shadow: 0 8px 28px rgba(0,0,0,.35); +} + +.initial-loader { + position: fixed; + inset: 0; + z-index: 9000; + display: grid; + place-items: center; + padding: 1rem; + background: radial-gradient(circle at 50% 35%, rgba(var(--bs-secondary-bg-rgb), .98), var(--bs-body-bg) 68%); + color: var(--bs-body-color); + transition: opacity .22s ease, visibility .22s ease; +} +.initial-loader.is-hidden { + opacity: 0; + visibility: hidden; + pointer-events: none; +} +.initial-loader-card { + width: min(92vw, 430px); + padding: 2rem; + border: 1px solid var(--bs-border-color); + border-radius: 18px; + background: rgba(var(--bs-secondary-bg-rgb), .88); + box-shadow: 0 24px 70px rgba(0,0,0,.48); + text-align: center; +} +.initial-loader-brand { + font-size: 1.35rem; + font-weight: 800; + letter-spacing: .2px; +} +.initial-loader-spinner { + margin: 1.4rem 0 1rem; +} +.initial-loader-title { + font-size: 1rem; + font-weight: 700; +} +.initial-loader-text { + margin-top: .35rem; + color: var(--bs-secondary-color); +} + +.main-grid { min-height: 0; display: grid; grid-template-columns: var(--sidebar) 1fr; } +.sidebar { padding: .65rem; overflow: auto; background: rgba(var(--bs-secondary-bg-rgb), .9); } +/* Note: Sidebar filters are wider and use one structured block per class to avoid duplicate overrides. */ +.filter { + width: 100%; + display: grid; + grid-template-columns: minmax(0, 1fr) auto; + gap: .15rem .55rem; + align-items: center; + margin-bottom: .2rem; + padding: .45rem .6rem; + border: 0; + border-radius: .55rem; + background: transparent; + color: var(--bs-body-color); + text-align: left; +} +.filter:hover, +.filter.active { + background: var(--bs-primary-bg-subtle); + color: var(--bs-primary-text-emphasis); +} +.filter > span:first-child { + min-width: 0; + font-weight: 600; + overflow: hidden; + text-overflow: ellipsis; + white-space: nowrap; +} +.filter > span:last-child { + min-width: 0; + max-width: 12rem; + text-align: right; +} +.filter-count { + display: block; + font-weight: 700; + line-height: 1.1; +} +.filter-meta { + display: block; + margin-top: .05rem; + color: var(--bs-secondary-color); + font-size: .68rem; + font-weight: 400; + line-height: 1.15; + opacity: .72; + overflow: hidden; + text-overflow: ellipsis; + white-space: nowrap; +} +.filter.active .filter-meta, +.filter:hover .filter-meta { + color: var(--bs-primary-text-emphasis); + opacity: .78; +} +.shortcut { font-size: .78rem; color: var(--bs-secondary-color); padding: .15rem .5rem; } +.content { min-width: 0; min-height: 0; display: grid; grid-template-rows: 1fr 255px; } +.table-wrap { overflow: auto; contain: content; } +.torrent-table { margin: 0; white-space: nowrap; table-layout: auto; } +.torrent-table thead th { position: sticky; top: 0; z-index: 2; background: var(--bs-tertiary-bg); border-bottom: 1px solid var(--bs-border-color); user-select: none; } +.torrent-table thead th[data-sort] { cursor: pointer; } +.torrent-table thead th[data-sort]:hover, .torrent-table thead th.sorted { color: var(--bs-primary-text-emphasis); } +.sort-icon { opacity: .85; } +.torrent-table tbody tr { cursor: default; height: 36px; } +.torrent-table tbody tr.selected td { background: var(--bs-primary-bg-subtle); } +.torrent-table .sel { width: 34px; text-align: center; } +.torrent-table .name { min-width: 280px; max-width: 520px; overflow: hidden; text-overflow: ellipsis; } +.torrent-table .path { max-width: 360px; overflow: hidden; text-overflow: ellipsis; color: var(--bs-secondary-color); } +.virtual-spacer td { padding: 0 !important; border: 0 !important; } +.empty { height: 120px; text-align: center; vertical-align: middle; color: var(--bs-secondary-color); } +.progress.thin { height: 7px; min-width: 130px; margin-bottom: 1px; background: rgba(255,255,255,.08); } +.details { min-height: 0; overflow: hidden; background: rgba(var(--bs-secondary-bg-rgb), .78); } +.detail-pane { height: 210px; overflow: auto; padding: .65rem; } +.loading-line { display: flex; align-items: center; gap: .5rem; color: var(--bs-secondary-color); padding: .75rem; } +.muted-pane { color: var(--bs-secondary-color); } +.detail-table { white-space: nowrap; } +.general-grid { display: grid; grid-template-columns: repeat(3, minmax(0, 1fr)); gap: .6rem; } +.general-grid div { border: 1px solid var(--bs-border-color); border-radius: .6rem; padding: .5rem; background: var(--bs-body-bg); min-width: 0; } +.general-grid b { display: block; color: var(--bs-secondary-color); font-size: .72rem; text-transform: uppercase; } +.general-grid span { overflow-wrap: anywhere; } +.statusbar { display: flex; align-items: center; gap: 1rem; padding: 0 .75rem; overflow-x: auto; background: var(--bs-tertiary-bg); color: var(--bs-secondary-color); white-space: nowrap; } +.statusbar b { color: var(--bs-body-color); } +.status-limit { border: 1px solid var(--bs-border-color); background: rgba(var(--bs-secondary-bg-rgb), .9); color: var(--bs-secondary-color); border-radius: .45rem; padding: .12rem .5rem; white-space: nowrap; } +.status-limit:hover { color: var(--bs-body-color); background: var(--bs-secondary-bg); } +.ctx-menu { display: none; position: absolute; z-index: 5000; min-width: 200px; padding: .35rem; border: 1px solid var(--bs-border-color); border-radius: .6rem; background: var(--bs-body-bg); } +.ctx-menu button { display: block; width: 100%; text-align: left; border: 0; background: transparent; color: var(--bs-body-color); padding: .42rem .55rem; border-radius: .4rem; } +.ctx-menu button:hover { background: var(--bs-secondary-bg); } +.ctx-menu .danger { color: var(--bs-danger); } +.ctx-menu hr { margin: .25rem 0; border-color: var(--bs-border-color); } +.profile-row { display: grid; grid-template-columns: 1fr auto; gap: .25rem .5rem; align-items: center; padding: .45rem; border: 1px solid var(--bs-border-color); border-radius: .6rem; margin-bottom: .45rem; background: rgba(var(--bs-secondary-bg-rgb), .58); } +.profile-row span { grid-column: 1 / 2; color: var(--bs-secondary-color); overflow-wrap: anywhere; } +.profile-form-actions { display: inline-flex; gap: .35rem; flex-wrap: wrap; } +.profile-actions { display: inline-flex; gap: .35rem; } +.profile-row.active { border-color: var(--bs-primary); background: var(--bs-primary-bg-subtle); } +.flag-icon { border-radius: 2px; box-shadow: 0 0 0 1px rgba(255,255,255,.12); } +.flag-code { color: var(--bs-secondary-color); margin-left: .25rem; } +.peer-actions { display: flex; align-items: center; gap: .25rem; flex-wrap: nowrap; } +.peer-actions .btn { display: inline-flex; align-items: center; gap: .25rem; border-radius: .35rem !important; } +.modal-content { background: var(--bs-body-bg); border: 1px solid var(--bs-border-color); border-radius: 14px; } +.modal-header, .modal-footer { background: rgba(var(--bs-secondary-bg-rgb), .82); border-color: var(--bs-border-color); } +.add-grid { display: grid; gap: .85rem; } +.magnet-box { min-height: 64px; resize: vertical; } +.upload-box, .surface-section { border: 1px solid var(--bs-border-color); background: rgba(var(--bs-secondary-bg-rgb), .5); border-radius: .75rem; padding: .75rem; } +.section-title { font-weight: 700; margin-bottom: .55rem; color: var(--bs-body-color); } +.preset-grid { display: grid; grid-template-columns: repeat(3, minmax(0, 1fr)); gap: .4rem; } +.toast-host { position: fixed; right: 14px; top: 70px; z-index: 8000; display: grid; gap: .4rem; } +.toast-item { padding: .45rem .65rem; border-radius: .55rem; box-shadow: 0 8px 25px rgba(0,0,0,.28); max-width: 360px; } +@media (max-width: 1100px) { + :root { --topbar: 88px; } + .topbar { align-items: flex-start; flex-wrap: wrap; } + .toolbar-left { flex: 1 1 100%; overflow: visible; flex-wrap: wrap; } + .toolbar-right { flex: 1 1 100%; justify-content: flex-end; } + .search { flex: 1 1 220px; width: auto; min-width: 160px; max-width: none; } +} +@media (max-width: 900px) { + :root { --sidebar: 0px; } + .sidebar { display: none; } + .general-grid { grid-template-columns: 1fr; } +} +@media (max-width: 640px) { + :root { --topbar: 132px; } + .toolbar-right { width: 100%; justify-content: flex-start; flex-wrap: nowrap; gap: .35rem; } + .search { flex: 1 1 0; width: auto; min-width: 0; max-width: none; } + .preset-grid { grid-template-columns: 1fr 1fr; } +} + + +.preferences-grid { + display: grid; + grid-template-columns: repeat(2, minmax(220px, 1fr)); + gap: .75rem; +} +.form-field { display: grid; gap: .3rem; } +.form-field > span { color: var(--bs-secondary-color); font-size: .78rem; font-weight: 700; text-transform: uppercase; } +@media (max-width: 640px) { .preferences-grid { grid-template-columns: 1fr; } } + +/* Feature additions without changing the existing visual shell */ +.date-compact { + white-space: nowrap; +} +.btn-xs { + --bs-btn-padding-y: .18rem; + --bs-btn-padding-x: .42rem; + --bs-btn-font-size: .78rem; + --bs-btn-border-radius: .35rem; +} +.nav-btn { + border-radius: .45rem !important; + margin: 0 !important; + display: inline-flex; + align-items: center; + justify-content: center; + gap: .25rem; +} +.nav-btn + .nav-btn, +.torrent-action + .torrent-action { margin-left: .08rem !important; } +.path-list { + height: 360px; + overflow: auto; + border: 1px solid var(--bs-border-color); + border-radius: .6rem; + background: rgba(var(--bs-secondary-bg-rgb), .35); +} +.path-row { + display: flex; + align-items: center; + gap: .5rem; + padding: .42rem .6rem; + border-bottom: 1px solid var(--bs-border-color); + cursor: pointer; +} +.path-row:hover { background: var(--bs-primary-bg-subtle); color: var(--bs-primary-text-emphasis); } +.chips { display: flex; gap: .35rem; flex-wrap: wrap; } +.chip { + border: 1px solid var(--bs-border-color); + background: rgba(var(--bs-secondary-bg-rgb), .6); + color: var(--bs-body-color); + border-radius: 999px; + padding: .22rem .6rem; + font-size: .78rem; +} +.mobile-list { overflow: auto; padding: .55rem; background: var(--bs-body-bg); } +.mobile-card { + border: 1px solid var(--bs-border-color); + background: rgba(var(--bs-secondary-bg-rgb), .72); + border-radius: .75rem; + padding: .65rem; + margin-bottom: .55rem; +} +.mobile-card.selected { outline: 2px solid var(--bs-primary); } +.mobile-card .name { font-weight: 700; word-break: break-word; } +.mobile-actions { display: flex; gap: .35rem; margin-top: .45rem; } +#systemChart { + width: 140px; + height: 24px; + border: 1px solid var(--bs-border-color); + border-radius: .35rem; + background: rgba(var(--bs-secondary-bg-rgb), .85); +} +.badge-degraded { background: #f59e0b !important; color: #111 !important; } +body.mobile-mode .table-wrap { display: none !important; } +body.mobile-mode #mobileList { display: block !important; } +body.mobile-mode .content { grid-template-rows: 1fr 210px; } +body.mobile-mode .torrent-table { display: none; } +@media (max-width: 640px) { + .nav-btn span { display: none; } +} + +/* Fixes: compact one-line progress cell and readable percent inside the bar. */ +.torrent-table td:nth-child(5) { min-width: 92px; width: 110px; white-space: nowrap; } +.hidden-col{display:none!important} +.status-docs{margin-left:auto;color:inherit;text-decoration:none;font-weight:600;opacity:.9;white-space:nowrap} +.status-docs:hover{opacity:1;text-decoration:underline} +.column-check{padding:.35rem .5rem;border:1px solid var(--bs-border-color);border-radius:.5rem;background:var(--bs-body-bg)} +.label-filters .label-filter{font-size:.82rem;padding:.34rem .5rem;margin-bottom:.15rem} +.label-filters .label-filter i{opacity:.75;margin-right:.25rem} +.column-manager{display:grid;grid-template-columns:repeat(auto-fill,minmax(170px,1fr));gap:.55rem} +.column-card{display:flex;align-items:center;gap:.5rem;padding:.55rem .65rem;border:1px solid var(--bs-border-color);border-radius:.7rem;background:rgba(var(--bs-secondary-bg-rgb),.45);cursor:pointer;user-select:none;transition:background .15s,border-color .15s,transform .15s} +.column-card:hover{border-color:var(--bs-primary);background:var(--bs-primary-bg-subtle)} +.column-card.active{border-color:rgba(var(--bs-primary-rgb),.55);background:var(--bs-primary-bg-subtle)} +.column-card input{margin:0}.column-card span{display:flex;gap:.45rem;align-items:center;font-weight:600}.column-card i{opacity:.72} +.path-row::before{content:'\f07b';font-family:'Font Awesome 6 Free';font-weight:900;color:var(--bs-warning)} +body.mobile-mode #mobileList{min-height:0;height:100%;overflow:auto;display:block!important} +body.mobile-mode .mobile-card{display:block}.mobile-card .mobile-actions button{min-width:34px} +#toolSmart .form-label{font-size:.75rem;color:var(--bs-secondary-color);margin-bottom:.2rem} +.profile-form-grid{display:grid;grid-template-columns:1.1fr 2.1fr .55fr .75fr auto auto;gap:.5rem;align-items:center} +#toolSmart .btn{padding:.25rem .55rem;border-radius:.5rem;white-space:nowrap} +#toolSmart .row .d-flex{align-items:end;justify-content:flex-start} +#trafficHistoryChart{width:100%;height:420px;border:1px solid var(--bs-border-color);border-radius:.75rem;background:var(--bs-body-bg)} +@media (max-width: 992px){.profile-form-grid{grid-template-columns:1fr}.profile-form-grid .btn{width:100%}} + +/* Requested fixes: stable charts, Smart Queue exceptions, label actions, mobile readability */ +.history-grid{display:grid;grid-template-columns:1fr;gap:1rem} +.history-card{border:1px solid var(--bs-border-color);border-radius:.8rem;background:rgba(var(--bs-secondary-bg-rgb),.35);padding:.75rem;min-width:0;overflow:hidden} +.history-title{font-weight:700;font-size:.9rem;margin-bottom:.45rem;color:var(--bs-body-color)} +#trafficHistoryChart,#trafficSpeedChart{display:block;width:100%;height:420px;max-width:100%;border:0;border-radius:.55rem;background:var(--bs-body-bg)} +@media (min-width: 992px){.history-grid{grid-template-columns:1fr}} +.smart-actions{display:flex;align-items:center;gap:.45rem;flex-wrap:wrap} +.empty-mini{padding:.7rem .8rem;border:1px dashed var(--bs-border-color);border-radius:.7rem;color:var(--bs-secondary-color);background:rgba(var(--bs-secondary-bg-rgb),.35)} +.label-manager-row{display:flex;align-items:center;justify-content:space-between;gap:.5rem;border:1px solid var(--bs-border-color);border-radius:.65rem;padding:.4rem .5rem;margin-bottom:.4rem;background:rgba(var(--bs-secondary-bg-rgb),.35)} +.tool-tab i{margin-right:.25rem;opacity:.82} +body.mobile-mode .content{display:grid!important;grid-template-rows:minmax(0,1fr)!important;min-height:0;overflow:hidden} +body.mobile-mode .details{display:none!important} +body.mobile-mode #mobileList{display:block!important;height:100%!important;min-height:220px;overflow:auto;position:relative;z-index:2;padding-bottom:1rem} +body.mobile-mode .main-grid{min-height:0;overflow:hidden} +@media (max-width:640px){.history-card{padding:.5rem}#trafficHistoryChart,#trafficSpeedChart{height:320px}.statusbar{font-size:.75rem;gap:.6rem}.mobile-list{padding:.45rem}.mobile-card{margin-bottom:.45rem}} + +/* Requested fixes: clean progress, mobile auto list, pagers, rTorrent config, peers refresh */ +.torrent-progress{height:16px;min-width:92px;position:relative;margin:0;overflow:hidden;background:rgba(var(--bs-secondary-bg-rgb),.8)!important} +.torrent-progress .progress-bar{min-width:0!important;position:relative;transition:width .25s ease,background-color .25s ease} +.torrent-progress>span{position:absolute;inset:0;display:flex;align-items:center;justify-content:center;font-size:10px;font-weight:700;line-height:1;color:var(--bs-body-color);text-shadow:none;white-space:nowrap;pointer-events:none} +.torrent-progress .progress-bar+span{color:var(--bs-body-color)} +body.mobile-mode #mobileList{display:block!important} +@media (max-width:700px){ + body:not(.desktop-mode) .table-wrap{display:none!important} + body:not(.desktop-mode) #mobileList{display:block!important;min-height:260px;height:100%;overflow:auto} + body:not(.desktop-mode) .content{display:grid!important;grid-template-rows:minmax(0,1fr)!important;min-height:0;overflow:hidden} + body:not(.desktop-mode) .details{display:none!important} +} +.pager-row{display:flex;align-items:center;justify-content:flex-end;gap:.5rem} +.peers-refresh{display:flex;align-items:center;gap:.5rem;justify-content:flex-end;padding:.35rem .75rem;border-bottom:1px solid var(--bs-border-color);background:rgba(var(--bs-secondary-bg-rgb),.35)} +.peers-refresh select{width:auto;min-width:96px} + +/* Mobile list: force visible on narrow screens even without manual toggle. */ +@media (max-width: 900px) { + body:not(.modal-open) .table-wrap { display: none !important; } + body:not(.modal-open) #mobileList { display: block !important; height: 100% !important; min-height: 260px; overflow: auto; } + body:not(.modal-open) .content { display: grid !important; grid-template-rows: minmax(0,1fr) !important; min-height: 0; overflow: hidden; } + body:not(.modal-open) .details { display: none !important; } +} +.torrent-paused td{opacity:.82} +.torrent-paused .name{font-style:italic} + +/* Mobile blank-view fix: sidebar disappears at 900px, so the mobile list must also be forced from 900px down. */ +@media (max-width: 900px) { + .main-grid { + display: grid !important; + grid-template-columns: minmax(0, 1fr) !important; + min-height: 0 !important; + height: 100% !important; + overflow: hidden !important; + } + .sidebar { display: none !important; } + .content { + display: grid !important; + grid-template-rows: minmax(0, 1fr) !important; + min-height: 0 !important; + height: 100% !important; + overflow: hidden !important; + } + .table-wrap { display: none !important; } + #mobileList { + display: block !important; + height: 100% !important; + min-height: 0 !important; + overflow: auto !important; + position: relative !important; + z-index: 10 !important; + background: var(--bs-body-bg) !important; + padding: .55rem !important; + } + .details { display: none !important; } + .toolbar-right { width: 100% !important; min-width: 0 !important; flex-wrap: nowrap !important; gap: .35rem !important; } + .search { min-width: 0 !important; width: auto !important; flex: 1 1 0 !important; max-width: none !important; } + .mobile-speed-stats { display: inline-flex; } +} +@media (max-width: 640px) { + .toolbar-right { flex-wrap: nowrap !important; gap: .3rem !important; } + .search { min-width: 0 !important; width: auto !important; flex: 1 1 0 !important; max-width: none !important; } + .mobile-speed-stats { gap: .25rem; font-size: .66rem; } +} + +.files-toolbar{display:flex;gap:.75rem;align-items:center;justify-content:space-between;flex-wrap:wrap;margin-bottom:.5rem} +.file-priority-table .path{max-width:520px;white-space:nowrap;overflow:hidden;text-overflow:ellipsis} +.file-priority-table .file-priority{min-width:110px} +@media (max-width:900px){.files-toolbar{align-items:stretch}.files-toolbar .btn-group{display:grid;grid-template-columns:1fr;width:100%}.file-priority-table{font-size:.82rem}.file-priority-table .path{max-width:180px}} + +.bulk-bar { + height: 38px; + display: flex; + align-items: center; + gap: .35rem; + flex-wrap: nowrap; + overflow-x: auto; + overflow-y: hidden; + padding: .35rem .55rem; + border-bottom: 1px solid var(--bs-border-color); + background: rgba(var(--bs-secondary-bg-rgb), .95); + z-index: 4; +} +.bulk-bar.d-none { display: none !important; } +.bulk-bar span { color: var(--bs-secondary-color); margin-right: .3rem; white-space: nowrap; } +.bulk-bar .btn { white-space: nowrap; flex: 0 0 auto; } +.move-options { + border: 1px solid var(--bs-border-color); + border-radius: .6rem; + padding: .75rem; + background: var(--bs-tertiary-bg); +} +/* Stable main layout: bulk actions overlay the list area, details stay pinned at the bottom. */ +.content { + position: relative; + grid-template-rows: minmax(0, 1fr) 255px !important; +} +#bulkBar { grid-row: 1; grid-column: 1; align-self: start; } +#tableWrap, #mobileList { grid-row: 1; grid-column: 1; min-height: 0; } +.details { grid-row: 2; grid-column: 1; min-height: 0; } +.bulk-bar:not(.d-none) + .table-wrap { padding-top: 38px; } +@media (max-width: 900px) { + .bulk-bar { gap: .3rem; } +} + + +.label-mini{font-size:.72rem;padding:.12rem .38rem;margin-right:.15rem} +.label-chip.active{border-color:var(--bs-primary);background:var(--bs-primary-bg-subtle);color:var(--bs-primary-text-emphasis)} +.label-selected{border-color:var(--bs-primary);background:var(--bs-primary-bg-subtle);color:var(--bs-primary-text-emphasis)} + +.automation-form-grid { display:grid; grid-template-columns: repeat(4, minmax(160px, 1fr)); gap:.5rem; align-items:center; } +.automation-row { display:flex; justify-content:space-between; gap:.75rem; align-items:center; padding:.55rem .65rem; border:1px solid var(--bs-border-color); border-radius:.6rem; margin-bottom:.45rem; background:var(--bs-body-bg); } +@media (max-width: 900px){ .automation-form-grid { grid-template-columns: 1fr; } } +.disk-status{display:inline-flex;align-items:center;gap:.35rem;min-width:110px} +.disk-status canvas{border-radius:999px;background:rgba(var(--bs-secondary-bg-rgb),.65)} +.disk-status.disk-warn b{color:var(--bs-warning)!important} + +.system-chart{width:96px;height:24px;border-radius:.35rem;background:rgba(var(--bs-secondary-bg-rgb),.45)} +.torrent-progress.is-complete>span{color:#fff;text-shadow:0 1px 2px rgba(0,0,0,.35)} +.peer-progress{min-width:86px;width:96px} +.loading-center{justify-content:center;min-height:80px} +.loading-cell{padding:0!important} +.mobile-list .loading-center{min-height:160px} + +/* Torrent warning and mobile controls */ +.torrent-warning td { background: rgba(245, 158, 11, .075) !important; } +.torrent-warning:hover td { background: rgba(245, 158, 11, .11) !important; } +.torrent-warning.selected td { background: color-mix(in srgb, var(--bs-primary-bg-subtle) 82%, rgba(245, 158, 11, .16)) !important; } +.mobile-card.torrent-warning { background: rgba(245, 158, 11, .075); } +.mobile-card.torrent-warning.selected { background: color-mix(in srgb, var(--bs-primary-bg-subtle) 82%, rgba(245, 158, 11, .16)); } +.torrent-warning-icon { color: var(--bs-warning); margin-right: .2rem; } +.mobile-filter-bar { + display: none; + grid-row: 1; + grid-column: 1; + align-self: start; + position: sticky; + top: 0; + z-index: 12; + padding: .45rem .55rem; + border-bottom: 1px solid var(--bs-border-color); + background: rgba(var(--bs-body-bg-rgb), .96); +} +.mobile-filter-actions, +.mobile-filter-select-row { + display: flex; + align-items: center; + gap: .35rem; +} +.mobile-filter-actions { margin-bottom: .4rem; } +.mobile-filter-actions span { color: var(--bs-secondary-color); font-size: .78rem; white-space: nowrap; } +.mobile-filter-select-row label { + color: var(--bs-secondary-color); + font-size: .78rem; + white-space: nowrap; +} +.mobile-filter-select-row select { + min-width: 0; + flex: 1 1 auto; +} +body.mobile-mode .mobile-filter-bar { display: block !important; } +body.mobile-mode #mobileList { padding-top: 5.2rem !important; } +@media (max-width: 900px) { + #mobileFilterBar { display: block !important; } + #mobileList { padding-top: 5.2rem !important; } + .topbar .badge { + width: .72rem; + height: .72rem; + min-width: .72rem; + padding: 0 !important; + border-radius: 999px; + overflow: hidden; + color: transparent !important; + text-indent: -999px; + box-shadow: 0 0 0 1px rgba(255,255,255,.22); + } + .topbar .badge .spinner-border { display: none; } +} + +/* rTorrent config */ +.rt-config-grid { + display: grid; + gap: .6rem; + grid-template-columns: repeat(auto-fit, minmax(280px, 1fr)); +} + +.rt-config-group { + grid-column: 1 / -1; + padding: .45rem .2rem .1rem; + border-bottom: 1px solid var(--bs-border-color); + color: var(--bs-primary-text-emphasis); + font-weight: 800; +} + +.rt-config-note { + margin-bottom: .75rem; +} + +.rt-config-toolbar { + display: flex; + align-items: center; + flex-wrap: wrap; + gap: .75rem; + margin-bottom: .75rem; +} + +.rt-config-row { + display: grid; + grid-template-columns: 1fr minmax(120px, 190px); + align-items: center; + gap: .6rem; + padding: .6rem; + border: 1px solid var(--bs-border-color); + border-radius: .7rem; + background: rgba(var(--bs-secondary-bg-rgb), .35); +} + +.rt-config-row b { + font-size: .88rem; +} + +.rt-config-row small { + display: block; + overflow-wrap: anywhere; + color: var(--bs-secondary-color); + font-size: .72rem; +} + +.rt-config-row.disabled { + opacity: .58; +} + +.rt-config-row.changed, +.rt-config-row.changed-live { + border-color: var(--bs-danger); + box-shadow: 0 0 0 .12rem rgba(220, 53, 69, .2); +} + +.rt-config-value-note { + margin-top: .15rem; +} + +.rt-config-output { + font-family: ui-monospace, SFMono-Regular, Menlo, Consolas, monospace; + font-size: .82rem; +} + +/* Tracker management */ +.tracker-toolbar, +.tracker-actions { + display: flex; + align-items: center; + flex-wrap: wrap; + gap: .45rem; +} + +.tracker-toolbar { + justify-content: space-between; + margin-bottom: .55rem; +} + +.tracker-url { + min-width: 240px; + max-width: 520px; +} + +.tracker-message { + max-width: 360px; + white-space: normal; + word-break: break-word; +} + +.tracker-url-text { + word-break: break-all; +} + +/* Cleanup and app diagnostics */ +.tool-note { + color: var(--bs-secondary-color); + font-size: .82rem; +} + +.cleanup-grid, +.diag-grid { + display: grid; + grid-template-columns: repeat(auto-fit, minmax(190px, 1fr)); + gap: .6rem; +} + +.cleanup-card, +.diag-card { + padding: .65rem; + border: 1px solid var(--bs-border-color); + border-radius: .7rem; + background: rgba(var(--bs-secondary-bg-rgb), .35); +} + +.cleanup-card b, +.diag-card b { + display: block; + margin-bottom: .2rem; + color: var(--bs-secondary-color); + font-size: .78rem; +} + +.cleanup-card span, +.diag-card span { + font-weight: 700; +} + +.cleanup-card small { + display: block; + margin-top: .2rem; + overflow-wrap: anywhere; + color: var(--bs-secondary-color); +} + +.cleanup-actions { + display: flex; + flex-wrap: wrap; + gap: .5rem; +} + +.diag-error { + border-color: rgba(var(--bs-danger-rgb), .45); + background: rgba(var(--bs-danger-rgb), .08); +} + +.port-status { + display: inline-flex; + align-items: center; + gap: .3rem; + padding: .12rem .4rem; + border-radius: .45rem; +} + +.port-ok { + background: rgba(34, 197, 94, .14); + color: var(--bs-success); +} + +.port-bad { + background: rgba(239, 68, 68, .14); + color: var(--bs-danger); +} + +.port-secondary { + background: rgba(148, 163, 184, .12); + color: var(--bs-secondary-color); +} + +.limit-slider-panel { + padding: .65rem; + border: 1px solid var(--bs-border-color); + border-radius: .7rem; + background: rgba(var(--bs-secondary-bg-rgb), .32); +} + +.limit-slider-row + .limit-slider-row { + margin-top: .65rem; +} + +.limit-slider-row .form-label { + display: flex; + justify-content: space-between; + gap: .75rem; + margin-bottom: .25rem; +} + +@media (max-width: 640px) { + #mobileToggle { + display: none !important; + } + + .tracker-url { + min-width: 160px; + max-width: 230px; + } + + .tracker-message { + max-width: 220px; + } +} +.text-compact { + display: inline-block; + max-width: 32rem; + overflow: hidden; + text-overflow: ellipsis; + vertical-align: bottom; + white-space: nowrap; +} + +/* Operation status, mobile progress and separated preferences */ +.torrent-operating td { + background: rgba(13, 202, 240, .085) !important; +} + +.torrent-operating:hover td { + background: rgba(13, 202, 240, .13) !important; +} + +.torrent-operating.selected td { + background: color-mix(in srgb, var(--bs-primary-bg-subtle) 78%, rgba(13, 202, 240, .18)) !important; +} + +.mobile-card.torrent-operating { + background: rgba(13, 202, 240, .085); + border-color: rgba(13, 202, 240, .45); +} + +.mobile-card.torrent-operating.selected { + background: color-mix(in srgb, var(--bs-primary-bg-subtle) 78%, rgba(13, 202, 240, .18)); +} + +.operation-status-badge { + color: #062c33; +} + +.mobile-progress { + margin-top: .45rem; +} + +.mobile-progress .torrent-progress { + width: 100%; + min-width: 0; +} + +.preferences-sections { + display: grid; + gap: 1rem; +} + +.preference-section { + border-left: .25rem solid var(--bs-primary); +} diff --git a/pytorrent/templates/index.html b/pytorrent/templates/index.html new file mode 100644 index 0000000..743fd44 --- /dev/null +++ b/pytorrent/templates/index.html @@ -0,0 +1,157 @@ + + + + + + pyTorrent + + + + + + +
+
+
pyTorrent
+
+
Loading torrents...
+
Connecting to rTorrent and preparing the first data snapshot.
+
+
+
Working...
+
+
+
+
pyTorrent
+ + + + + + + + + + +
+
+ 0 B/s 0 B/s + busy + offline + + +
+
+ +
+ + +
+
0 selected
+ + + + + + + + +
NameStatusSizeProgressDLULSeedsPeersRatioPathLabelRatio group
Waiting for data.
+
+
+
+
+ +
+
Select a torrent.
+
+
+
+ +
+ CPU -%RAM -% + - + rTorrent -DL 0 B/sUL 0 B/s + + Total DL/UP 0B/0B Port - unknownShown 0Selected 0 Docs API +
+
+ +
+
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + +
+ + + + + diff --git a/pytorrent/utils.py b/pytorrent/utils.py new file mode 100644 index 0000000..7a2639a --- /dev/null +++ b/pytorrent/utils.py @@ -0,0 +1,21 @@ +from __future__ import annotations + +import hashlib +from pathlib import Path + + +def human_size(num: int | float | None, suffix: str = "B") -> str: + value = float(num or 0) + for unit in ["", "K", "M", "G", "T", "P"]: + if abs(value) < 1024.0: + return f"{value:3.1f} {unit}{suffix}" if unit else f"{int(value)} {suffix}" + value /= 1024.0 + return f"{value:.1f} E{suffix}" + + +def human_rate(num: int | float | None) -> str: + return f"{human_size(num)}/s" + + +def file_md5(path: Path) -> str: + return hashlib.md5(path.read_bytes()).hexdigest()[:12] diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 0000000..8810f4f --- /dev/null +++ b/requirements.txt @@ -0,0 +1,6 @@ +Flask>=3.0 +Flask-SocketIO>=5.3 +python-dotenv>=1.0 +geoip2>=4.8 +psutil>=5.9 +simple-websocket>=1.0 diff --git a/scripts/download_geoip.sh b/scripts/download_geoip.sh new file mode 100755 index 0000000..5c8cf51 --- /dev/null +++ b/scripts/download_geoip.sh @@ -0,0 +1,41 @@ +#!/usr/bin/env bash +set -euo pipefail + +DB_PATH="${1:-data/GeoLite2-City.mmdb}" +PRIMARY_URL="https://git.io/GeoLite2-City.mmdb" +FALLBACK_URL="https://github.com/P3TERX/GeoLite.mmdb/raw/download/GeoLite2-City.mmdb" +DB_DIR="$(dirname "$DB_PATH")" +TMP_FILE="${DB_PATH}.tmp" + +mkdir -p "$DB_DIR" +chmod 755 "$DB_DIR" + +if [ -s "$DB_PATH" ]; then + chmod 644 "$DB_PATH" + echo "GeoIP database already exists: $DB_PATH" + exit 0 +fi + +download() { + url="$1" + if command -v curl >/dev/null 2>&1; then + curl -fL --retry 3 --connect-timeout 15 --output "$TMP_FILE" "$url" + elif command -v wget >/dev/null 2>&1; then + wget -O "$TMP_FILE" "$url" + else + echo "Missing downloader: install curl or wget" >&2 + return 127 + fi +} + +rm -f "$TMP_FILE" +if ! download "$PRIMARY_URL"; then + rm -f "$TMP_FILE" + download "$FALLBACK_URL" +fi + +test -s "$TMP_FILE" +mv "$TMP_FILE" "$DB_PATH" +chmod 644 "$DB_PATH" + +echo "GeoIP database downloaded: $DB_PATH" diff --git a/systemd/pytorrent.service b/systemd/pytorrent.service new file mode 100644 index 0000000..54a3124 --- /dev/null +++ b/systemd/pytorrent.service @@ -0,0 +1,16 @@ +[Unit] +Description=pyTorrent web UI for rTorrent +After=network.target + +[Service] +Type=simple +WorkingDirectory=/opt/pytorrent +EnvironmentFile=/opt/pytorrent/.env +ExecStart=/opt/pytorrent/venv/bin/python /opt/pytorrent/app.py +Restart=always +RestartSec=3 +User=www-data +Group=www-data + +[Install] +WantedBy=multi-user.target