Merge pull request 'gunicorn' (#1) from gunicorn into master
Reviewed-on: #1
This commit was merged in pull request #1.
This commit is contained in:
@@ -6,6 +6,8 @@ PYTORRENT_DEBUG=0
|
||||
PYTORRENT_POLL_INTERVAL=1.0
|
||||
PYTORRENT_WORKERS=16
|
||||
PYTORRENT_GEOIP_DB=data/GeoLite2-City.mmdb
|
||||
PYTORRENT_ALLOW_UNSAFE_WERKZEUG=0
|
||||
PYTORRENT_SCGI_RETRIES=8
|
||||
|
||||
# Retention / Smart Queue
|
||||
PYTORRENT_TRAFFIC_HISTORY_RETENTION_DAYS=90
|
||||
|
||||
17
README.md
17
README.md
@@ -37,6 +37,23 @@ python app.py
|
||||
|
||||
Domyślnie: `http://127.0.0.1:8090`.
|
||||
|
||||
## Uruchomienie produkcyjne
|
||||
|
||||
Preferowany wariant bez deweloperskiego Werkzeug:
|
||||
|
||||
```bash
|
||||
. venv/bin/activate
|
||||
gunicorn --worker-class gthread --workers 1 --threads 32 --bind 0.0.0.0:8090 --access-logfile - --error-logfile - wsgi:app
|
||||
```
|
||||
|
||||
Note: aplikacja zostaje przy `async_mode="threading"`, więc WebSocket, `start_background_task`, kolejka operacji i poller działają w tym samym modelu co wcześniej.
|
||||
|
||||
Alternatywy przeanalizowane, ale nie wdrożone:
|
||||
|
||||
- `eventlet` przez Gunicorn: działa z Flask-SocketIO, ale wymaga green threads i monkey-patching; większe ryzyko regresji dla operacji plikowych/SCGI.
|
||||
- `gevent` przez Gunicorn: dobry wariant produkcyjny, ale wymaga dodatkowych zależności i testów zgodności.
|
||||
- wiele workerów Gunicorn: wymaga Redis/RabbitMQ/Kafka jako message queue dla Socket.IO, więc nie jest zamiennikiem 1:1.
|
||||
|
||||
## Profil SCGI
|
||||
|
||||
Przykład:
|
||||
|
||||
11
app.py
11
app.py
@@ -1,7 +1,14 @@
|
||||
from pytorrent import create_app, socketio
|
||||
from pytorrent.config import HOST, PORT, DEBUG
|
||||
from pytorrent.config import ALLOW_UNSAFE_WERKZEUG, DEBUG, HOST, PORT
|
||||
|
||||
app = create_app()
|
||||
|
||||
if __name__ == "__main__":
|
||||
socketio.run(app, host=HOST, port=PORT, debug=DEBUG, allow_unsafe_werkzeug=True)
|
||||
# Note: This entrypoint is kept for local development; production should use gunicorn via wsgi:app.
|
||||
socketio.run(
|
||||
app,
|
||||
host=HOST,
|
||||
port=PORT,
|
||||
debug=DEBUG,
|
||||
allow_unsafe_werkzeug=ALLOW_UNSAFE_WERKZEUG,
|
||||
)
|
||||
|
||||
@@ -8,20 +8,16 @@ Wants=network-online.target
|
||||
|
||||
[Service]
|
||||
Type=simple
|
||||
#User=root
|
||||
#Group=root
|
||||
User=pytorrent
|
||||
Group=pytorrent
|
||||
WorkingDirectory=/opt/pyTorrent
|
||||
Environment="PYTHONUNBUFFERED=1"
|
||||
EnvironmentFile=/opt/pyTorrent/.env
|
||||
ExecStart=/opt/pyTorrent/venv/bin/python /opt/pyTorrent/app.py
|
||||
ExecStart=/opt/pyTorrent/venv/bin/gunicorn --worker-class gthread --workers 1 --threads 32 --bind ${PYTORRENT_HOST}:${PYTORRENT_PORT} --access-logfile - --error-logfile - wsgi:app
|
||||
Restart=always
|
||||
RestartSec=3
|
||||
KillSignal=SIGINT
|
||||
TimeoutStopSec=20
|
||||
|
||||
# opcjonalnie
|
||||
NoNewPrivileges=true
|
||||
PrivateTmp=true
|
||||
|
||||
|
||||
@@ -7,6 +7,14 @@ from dotenv import load_dotenv
|
||||
BASE_DIR = Path(__file__).resolve().parent.parent
|
||||
load_dotenv(BASE_DIR / ".env")
|
||||
|
||||
|
||||
def _env_bool(name: str, default: bool = False) -> bool:
|
||||
value = os.getenv(name)
|
||||
if value is None:
|
||||
return default
|
||||
return value.strip().lower() in {"1", "true", "yes", "on"}
|
||||
|
||||
|
||||
SECRET_KEY = os.getenv("PYTORRENT_SECRET_KEY", "dev-change-me")
|
||||
DB_PATH = Path(os.getenv("PYTORRENT_DB_PATH", str(BASE_DIR / "data" / "pytorrent.sqlite3")))
|
||||
if not DB_PATH.is_absolute():
|
||||
@@ -14,7 +22,9 @@ if not DB_PATH.is_absolute():
|
||||
|
||||
HOST = os.getenv("PYTORRENT_HOST", "0.0.0.0")
|
||||
PORT = int(os.getenv("PYTORRENT_PORT", "8090"))
|
||||
DEBUG = os.getenv("PYTORRENT_DEBUG", "0") == "1"
|
||||
DEBUG = _env_bool("PYTORRENT_DEBUG", False)
|
||||
# Note: Keep Werkzeug opt-in only for explicit local/dev use, never by default in services.
|
||||
ALLOW_UNSAFE_WERKZEUG = _env_bool("PYTORRENT_ALLOW_UNSAFE_WERKZEUG", DEBUG)
|
||||
POLL_INTERVAL = float(os.getenv("PYTORRENT_POLL_INTERVAL", "1.0"))
|
||||
WORKERS = int(os.getenv("PYTORRENT_WORKERS", "16"))
|
||||
GEOIP_DB = Path(os.getenv("PYTORRENT_GEOIP_DB", str(BASE_DIR / "data" / "GeoLite2-City.mmdb")))
|
||||
|
||||
@@ -19,11 +19,13 @@ from ..db import default_user_id, connect, utcnow
|
||||
from ..services import preferences, rtorrent
|
||||
from ..services.torrent_cache import torrent_cache
|
||||
from ..services.torrent_summary import cached_summary
|
||||
from ..services.workers import enqueue, list_jobs, cancel_job, retry_job, clear_jobs
|
||||
from ..services.workers import enqueue, list_jobs, cancel_job, retry_job, clear_jobs, emergency_clear_jobs
|
||||
from ..services.geoip import lookup_ip
|
||||
|
||||
bp = Blueprint("api", __name__, url_prefix="/api")
|
||||
|
||||
MOVE_BULK_MAX_HASHES = 100
|
||||
|
||||
|
||||
def ok(payload=None):
|
||||
data = {"ok": True}
|
||||
@@ -303,6 +305,52 @@ def enrich_bulk_payload(profile: dict, action_name: str, data: dict) -> dict:
|
||||
return payload
|
||||
|
||||
|
||||
def _chunk_hashes(hashes: list[str], size: int = MOVE_BULK_MAX_HASHES) -> list[list[str]]:
|
||||
# Note: Splits very large torrent selections into predictable chunks so each queued job stays small and recoverable.
|
||||
safe_size = max(1, int(size or MOVE_BULK_MAX_HASHES))
|
||||
return [hashes[index:index + safe_size] for index in range(0, len(hashes), safe_size)]
|
||||
|
||||
|
||||
def enqueue_bulk_parts(profile: dict, action_name: str, data: dict) -> list[dict]:
|
||||
# Note: Jedna wspolna funkcja dzieli duze operacje move/remove na male, uporzadkowane party bez ruszania pozostalych akcji.
|
||||
base_payload = enrich_bulk_payload(profile, action_name, data)
|
||||
hashes = base_payload.get("hashes") or []
|
||||
chunks = _chunk_hashes(hashes)
|
||||
if len(chunks) <= 1:
|
||||
job_id = enqueue(action_name, profile["id"], base_payload)
|
||||
return [{"job_id": job_id, "label": "bulk-1", "part": 1, "parts": 1, "hashes": hashes, "hash_count": len(hashes)}]
|
||||
|
||||
jobs = []
|
||||
items_by_hash = {str(item.get("hash")): item for item in (base_payload.get("job_context") or {}).get("items") or []}
|
||||
for index, chunk in enumerate(chunks, start=1):
|
||||
payload = dict(base_payload)
|
||||
payload["hashes"] = chunk
|
||||
context = dict(base_payload.get("job_context") or {})
|
||||
context.update({
|
||||
"bulk": True,
|
||||
"bulk_label": f"bulk-{index}",
|
||||
"bulk_part": index,
|
||||
"bulk_parts": len(chunks),
|
||||
"hash_count": len(chunk),
|
||||
"parent_hash_count": len(hashes),
|
||||
"items": [items_by_hash[h] for h in chunk if h in items_by_hash],
|
||||
})
|
||||
payload["job_context"] = context
|
||||
job_id = enqueue(action_name, profile["id"], payload)
|
||||
jobs.append({"job_id": job_id, "label": context["bulk_label"], "part": index, "parts": len(chunks), "hashes": chunk, "hash_count": len(chunk)})
|
||||
return jobs
|
||||
|
||||
|
||||
def enqueue_move_bulk_parts(profile: dict, data: dict) -> list[dict]:
|
||||
# Note: Zachowuje stary publiczny helper dla move, ale korzysta z tej samej logiki partycji.
|
||||
return enqueue_bulk_parts(profile, "move", data)
|
||||
|
||||
|
||||
def enqueue_remove_bulk_parts(profile: dict, data: dict) -> list[dict]:
|
||||
# Note: Remove/rm dostaje identyczne dzielenie na party jak move, co zmniejsza load na rTorrent.
|
||||
return enqueue_bulk_parts(profile, "remove", data)
|
||||
|
||||
|
||||
@bp.get("/profiles")
|
||||
def profiles_list():
|
||||
return ok({"profiles": preferences.list_profiles(), "active": preferences.active_profile()})
|
||||
@@ -437,6 +485,20 @@ def torrent_action(action_name: str):
|
||||
allowed = {"start", "pause", "stop", "resume", "recheck", "reannounce", "remove", "move", "set_label", "set_ratio_group"}
|
||||
if action_name not in allowed:
|
||||
return jsonify({"ok": False, "error": "Unknown action"}), 400
|
||||
if action_name in {"move", "remove"}:
|
||||
# Note: Large move/remove requests are split into ordered bulk parts; smaller requests keep the old single-job response shape.
|
||||
jobs = enqueue_bulk_parts(profile, action_name, data)
|
||||
first_job_id = jobs[0]["job_id"] if jobs else None
|
||||
total_hashes = sum(int(job.get("hash_count") or 0) for job in jobs)
|
||||
return ok({
|
||||
"job_id": first_job_id,
|
||||
"job_ids": [job["job_id"] for job in jobs],
|
||||
"jobs": jobs,
|
||||
"hash_count": total_hashes,
|
||||
"bulk": total_hashes > 1,
|
||||
"bulk_parts": len(jobs),
|
||||
"chunk_size": MOVE_BULK_MAX_HASHES,
|
||||
})
|
||||
payload = enrich_bulk_payload(profile, action_name, data)
|
||||
job_id = enqueue(action_name, profile["id"], payload)
|
||||
return ok({"job_id": job_id, "hash_count": len(payload.get("hashes") or []), "bulk": len(payload.get("hashes") or []) > 1})
|
||||
@@ -566,8 +628,12 @@ def jobs_list():
|
||||
|
||||
@bp.post("/jobs/clear")
|
||||
def jobs_clear():
|
||||
if str(request.args.get("force") or "").lower() in {"1", "true", "yes"}:
|
||||
# Awaryjne czyszczenie: endpoint zachowuje standardowe działanie, a force=1 uruchamia tryb ratunkowy.
|
||||
deleted = emergency_clear_jobs()
|
||||
return ok({"deleted": deleted, "emergency": True})
|
||||
deleted = clear_jobs()
|
||||
return ok({"deleted": deleted})
|
||||
return ok({"deleted": deleted, "emergency": False})
|
||||
|
||||
|
||||
@bp.get("/cleanup/summary")
|
||||
@@ -609,8 +675,8 @@ def cleanup_all():
|
||||
@bp.post("/jobs/<job_id>/cancel")
|
||||
def jobs_cancel(job_id: str):
|
||||
if not cancel_job(job_id):
|
||||
return jsonify({"ok": False, "error": "Only pending or failed jobs can be cancelled"}), 400
|
||||
return ok()
|
||||
return jsonify({"ok": False, "error": "Only unfinished jobs can be cancelled"}), 400
|
||||
return ok({"emergency": True})
|
||||
|
||||
|
||||
@bp.post("/jobs/<job_id>/retry")
|
||||
|
||||
@@ -55,7 +55,7 @@ def openapi():
|
||||
},
|
||||
},
|
||||
"/api/torrents": {"get": {"summary": "Get cached torrent snapshot", "responses": {"200": {"description": "Torrent list"}}}},
|
||||
"/api/torrents/{action_name}": {"post": {"summary": "Queue torrent action", "description": "For move, path is the target directory; move_data=true physically moves data on the rTorrent host using a detached shell move with status polling, force-overwrites an existing destination, tolerates rTorrent execute timeouts around mkdir/start/polling, handles retries after a partially completed move, avoids SCGI timeout on long mv operations, and recheck defaults to move_data. Move and remove jobs are ordered per profile, so a later remove waits for earlier move/remove jobs to finish.", "parameters": [{"name": "action_name", "in": "path", "required": True, "schema": {"type": "string", "enum": ["start", "pause", "stop", "resume", "recheck", "remove", "move", "set_label", "set_ratio_group"]}}], "requestBody": {"content": {"application/json": {"schema": {"type": "object", "properties": {"hashes": {"type": "array", "items": {"type": "string"}}, "path": {"type": "string", "description": "Target directory for move"}, "move_data": {"type": "boolean", "description": "Physically move data before setting torrent directory"}, "recheck": {"type": "boolean", "description": "Run hash check after physical move; defaults to move_data"}, "label": {"type": "string"}, "ratio_group": {"type": "string"}, "remove_data": {"type": "boolean"}}}}}}, "responses": {"200": {"description": "Job queued"}}}},
|
||||
"/api/torrents/{action_name}": {"post": {"summary": "Queue torrent action", "description": "For move, path is the target directory; move_data=true physically moves data on the rTorrent host using a detached shell move with status polling, force-overwrites an existing destination, tolerates rTorrent execute timeouts around mkdir/start/polling, handles retries after a partially completed move, avoids SCGI timeout on long mv operations, and recheck defaults to move_data. Large move selections are split into ordered bulk parts of up to 100 hashes. Move and remove jobs are ordered per profile, so a later remove waits for earlier move/remove jobs to finish.", "parameters": [{"name": "action_name", "in": "path", "required": True, "schema": {"type": "string", "enum": ["start", "pause", "stop", "resume", "recheck", "remove", "move", "set_label", "set_ratio_group"]}}], "requestBody": {"content": {"application/json": {"schema": {"type": "object", "properties": {"hashes": {"type": "array", "items": {"type": "string"}}, "path": {"type": "string", "description": "Target directory for move"}, "move_data": {"type": "boolean", "description": "Physically move data before setting torrent directory"}, "recheck": {"type": "boolean", "description": "Run hash check after physical move; defaults to move_data"}, "label": {"type": "string"}, "ratio_group": {"type": "string"}, "remove_data": {"type": "boolean"}}}}}}, "responses": {"200": {"description": "Job queued"}}}},
|
||||
"/api/torrents/add": {"post": {"summary": "Add magnet links or torrent files", "requestBody": {"content": {"multipart/form-data": {"schema": {"type": "object", "properties": {"uris": {"type": "string"}, "directory": {"type": "string"}, "label": {"type": "string"}, "start": {"type": "boolean"}, "files": {"type": "array", "items": {"type": "string", "format": "binary"}}}}}, "application/json": {"schema": {"type": "object"}}}}, "responses": {"200": {"description": "Jobs queued"}}}},
|
||||
"/api/torrents/{torrent_hash}/files": {"get": {"summary": "Torrent files", "parameters": [{"name": "torrent_hash", "in": "path", "required": True, "schema": {"type": "string"}}], "responses": {"200": {"description": "Files"}}}},
|
||||
"/api/torrents/{torrent_hash}/peers": {"get": {"summary": "Torrent peers with GeoIP", "parameters": [{"name": "torrent_hash", "in": "path", "required": True, "schema": {"type": "string"}}], "responses": {"200": {"description": "Peers"}}}},
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import errno
|
||||
import os
|
||||
import posixpath
|
||||
import socket
|
||||
@@ -53,6 +54,10 @@ class ScgiRtorrentClient:
|
||||
}
|
||||
header_blob = b"".join(k.encode() + b"\0" + v.encode() + b"\0" for k, v in headers.items())
|
||||
payload = str(len(header_blob)).encode("ascii") + b":" + header_blob + b"," + body
|
||||
attempts = _scgi_retry_attempts()
|
||||
last_exc = None
|
||||
for attempt in range(1, attempts + 1):
|
||||
try:
|
||||
with socket.create_connection((self.host, self.port), timeout=self.timeout) as sock:
|
||||
sock.settimeout(self.timeout)
|
||||
sock.sendall(payload)
|
||||
@@ -71,6 +76,35 @@ class ScgiRtorrentClient:
|
||||
response = response.split(b"\n\n", 1)[1]
|
||||
result, _ = loads(response)
|
||||
return result[0] if len(result) == 1 else result
|
||||
except Exception as exc:
|
||||
last_exc = exc
|
||||
if attempt >= attempts or not _is_transient_scgi_error(exc):
|
||||
raise
|
||||
time.sleep(_scgi_retry_delay(attempt))
|
||||
raise last_exc or ConnectionError("rTorrent SCGI call failed")
|
||||
|
||||
|
||||
def _scgi_retry_attempts() -> int:
|
||||
# Note: Krotki retry/backoff chroni masowe operacje przed chwilowym Errno 111 przy wysokim loadzie rTorrent.
|
||||
try:
|
||||
return max(1, min(10, int(os.environ.get("PYTORRENT_SCGI_RETRIES", "5"))))
|
||||
except Exception:
|
||||
return 5
|
||||
|
||||
|
||||
def _scgi_retry_delay(attempt: int) -> float:
|
||||
return min(5.0, 0.35 * (2 ** max(0, attempt - 1)))
|
||||
|
||||
|
||||
def _is_transient_scgi_error(exc: Exception) -> bool:
|
||||
# Note: Retry obejmuje typowe chwilowe bledy SCGI/socket, ale nie ukrywa bledow merytorycznych XML-RPC.
|
||||
if isinstance(exc, (ConnectionRefusedError, ConnectionResetError, TimeoutError, socket.timeout)):
|
||||
return True
|
||||
err_no = getattr(exc, "errno", None)
|
||||
if err_no in {errno.ECONNREFUSED, errno.ECONNRESET, errno.ETIMEDOUT, errno.EHOSTUNREACH, errno.ENETUNREACH}:
|
||||
return True
|
||||
msg = str(exc).lower()
|
||||
return any(text in msg for text in ("connection refused", "connection reset", "timed out", "timeout", "empty response", "pipe creation failed", "resource temporarily unavailable", "try again", "temporarily unavailable"))
|
||||
|
||||
|
||||
def client_for(profile: dict) -> ScgiRtorrentClient:
|
||||
@@ -78,32 +112,78 @@ def client_for(profile: dict) -> ScgiRtorrentClient:
|
||||
|
||||
|
||||
_UNSUPPORTED_EXEC_METHODS: set[str] = set()
|
||||
_EXEC_TARGET_STYLE: dict[str, int] = {}
|
||||
|
||||
def _rt_execute(c: ScgiRtorrentClient, method: str, *args):
|
||||
"""Run rTorrent execute.* as the rTorrent user across XML-RPC variants."""
|
||||
method_names = [method]
|
||||
if method.startswith("execute."):
|
||||
execute2 = method.replace("execute.", "execute2.", 1)
|
||||
if execute2 not in _UNSUPPORTED_EXEC_METHODS:
|
||||
method_names.append(execute2)
|
||||
errors = []
|
||||
for method_name in method_names:
|
||||
for call_args in (("", *args), args):
|
||||
try:
|
||||
return c.call(method_name, *call_args)
|
||||
except Exception as exc:
|
||||
message = str(exc)
|
||||
if "not defined" in message.lower():
|
||||
_UNSUPPORTED_EXEC_METHODS.add(method_name)
|
||||
def _rt_execute_preview(method_name: str, call_args: tuple) -> str:
|
||||
# Note: Skrocony opis RPC usuwa dlugie skrypty z komunikatu bledu, ale zostawia metode i pierwsze argumenty do diagnostyki.
|
||||
preview = ", ".join(repr(x) for x in call_args[:3])
|
||||
if len(call_args) > 3:
|
||||
preview += ", ..."
|
||||
errors.append(f"{method_name}({preview}): {exc}")
|
||||
return f"{method_name}({preview})"
|
||||
|
||||
|
||||
def _rt_execute_target_variants(method: str, args: tuple) -> list[tuple]:
|
||||
# Note: rTorrent XML-RPC w zaleznosci od wersji wymaga pustego targetu albo go odrzuca; zapamietujemy dzialajacy wariant per metoda.
|
||||
variants = [("", *args), args]
|
||||
preferred = _EXEC_TARGET_STYLE.get(method)
|
||||
if preferred is not None and 0 <= preferred < len(variants):
|
||||
return [variants[preferred]] + [v for i, v in enumerate(variants) if i != preferred]
|
||||
return variants
|
||||
|
||||
|
||||
def _is_rt_method_missing(exc: Exception) -> bool:
|
||||
msg = str(exc).lower()
|
||||
return "not defined" in msg or "no such method" in msg or "unknown method" in msg
|
||||
|
||||
|
||||
def _rt_execute_methods(method: str) -> list[str]:
|
||||
# Note: execute2.* jest probowane dopiero gdy podstawowe execute.* nie istnieje, zeby nie generowac falszywych bledow retry.
|
||||
methods = [method]
|
||||
if method.startswith("execute."):
|
||||
fallback = method.replace("execute.", "execute2.", 1)
|
||||
if fallback not in _UNSUPPORTED_EXEC_METHODS:
|
||||
methods.append(fallback)
|
||||
return methods
|
||||
|
||||
|
||||
def _rt_execute(c: ScgiRtorrentClient, method: str, *args):
|
||||
"""Run rTorrent execute.* as the rTorrent user across XML-RPC variants."""
|
||||
errors: list[str] = []
|
||||
attempts = _scgi_retry_attempts()
|
||||
for attempt in range(1, attempts + 1):
|
||||
errors.clear()
|
||||
transient_seen = False
|
||||
primary_missing = False
|
||||
for method_index, method_name in enumerate(_rt_execute_methods(method)):
|
||||
if method_name in _UNSUPPORTED_EXEC_METHODS:
|
||||
continue
|
||||
if method_index > 0 and not primary_missing:
|
||||
continue
|
||||
for call_args in _rt_execute_target_variants(method_name, args):
|
||||
try:
|
||||
result = c.call(method_name, *call_args)
|
||||
if method_name == method:
|
||||
_EXEC_TARGET_STYLE[method_name] = 0 if call_args and call_args[0] == "" else 1
|
||||
return result
|
||||
except Exception as exc:
|
||||
if _is_rt_method_missing(exc):
|
||||
_UNSUPPORTED_EXEC_METHODS.add(method_name)
|
||||
if method_name == method:
|
||||
primary_missing = True
|
||||
errors.append(f"{method_name}: method not defined")
|
||||
break
|
||||
transient_seen = transient_seen or _is_transient_scgi_error(exc)
|
||||
errors.append(f"{_rt_execute_preview(method_name, call_args)}: {exc}")
|
||||
if transient_seen and attempt < attempts:
|
||||
time.sleep(_scgi_retry_delay(attempt))
|
||||
continue
|
||||
break
|
||||
raise RuntimeError("rTorrent execute failed: " + "; ".join(errors))
|
||||
|
||||
|
||||
def _is_rt_timeout_error(exc: Exception) -> bool:
|
||||
return isinstance(exc, (TimeoutError, socket.timeout)) or "timed out" in str(exc).lower()
|
||||
msg = str(exc).lower()
|
||||
return isinstance(exc, (TimeoutError, socket.timeout)) or "timed out" in msg or "timeout" in msg
|
||||
|
||||
|
||||
def _rt_execute_allow_timeout(c: ScgiRtorrentClient, method: str, *args):
|
||||
@@ -159,7 +239,8 @@ def _run_remote_move(c: ScgiRtorrentClient, src: str, dst: str, poll_interval: f
|
||||
try:
|
||||
output = str(_rt_execute(c, "execute.capture", "sh", "-c", poll_script, "pytorrent-move-poll", status_path) or "").strip()
|
||||
except Exception as exc:
|
||||
if _is_rt_timeout_error(exc):
|
||||
# Note: Podczas masowego move rTorrent potrafi chwilowo nie utworzyc pipe dla execute.capture; polling czeka i probuje dalej.
|
||||
if _is_rt_timeout_error(exc) or _is_transient_scgi_error(exc):
|
||||
continue
|
||||
raise
|
||||
if not output:
|
||||
@@ -207,6 +288,47 @@ def _safe_rm_rf_path(path: str) -> str:
|
||||
return path
|
||||
|
||||
|
||||
def _run_remote_rm(c: ScgiRtorrentClient, path: str, poll_interval: float = 2.0) -> None:
|
||||
# Note: rm -rf dziala w tle po stronie rTorrent, wiec dlugie kasowanie nie trzyma jednego polaczenia SCGI.
|
||||
token = uuid.uuid4().hex
|
||||
status_path = f"/tmp/pytorrent-rm-{token}.status"
|
||||
script = (
|
||||
'target=$1; status=$2; tmp=${status}.tmp; '
|
||||
'rm -f "$status" "$tmp"; '
|
||||
'( rc=0; '
|
||||
'if [ -z "$target" ] || [ "$target" = "/" ] || [ "$target" = "." ]; then echo "unsafe remove target: $target" >&2; rc=5; '
|
||||
'else rm -rf -- "$target" || rc=$?; fi; '
|
||||
'if [ $rc -eq 0 ]; then printf "OK\n" > "$status"; else printf "ERR %s\n" "$rc" > "$status"; fi; '
|
||||
'if [ -s "$tmp" ]; then cat "$tmp" >> "$status"; fi; '
|
||||
'rm -f "$tmp" ) > "$tmp" 2>&1 &'
|
||||
)
|
||||
poll_script = 'status=$1; [ -f "$status" ] && cat "$status" || true'
|
||||
cleanup_script = 'rm -f "$1"'
|
||||
_rt_execute_allow_timeout(c, "execute.throw", "sh", "-c", script, "pytorrent-rm-start", path, status_path)
|
||||
while True:
|
||||
time.sleep(max(0.25, poll_interval))
|
||||
try:
|
||||
output = str(_rt_execute(c, "execute.capture", "sh", "-c", poll_script, "pytorrent-rm-poll", status_path) or "").strip()
|
||||
except Exception as exc:
|
||||
# Note: Remove uzywa tego samego bezpiecznego pollingu co move, wiec chwilowy brak pipe nie wywala calej kolejki.
|
||||
if _is_rt_timeout_error(exc) or _is_transient_scgi_error(exc):
|
||||
continue
|
||||
raise
|
||||
if not output:
|
||||
continue
|
||||
try:
|
||||
_rt_execute(c, "execute.throw", "sh", "-c", cleanup_script, "pytorrent-rm-clean", status_path)
|
||||
except Exception:
|
||||
pass
|
||||
first_line = output.splitlines()[0].strip()
|
||||
if first_line == "OK":
|
||||
return
|
||||
if first_line.startswith("ERR"):
|
||||
details = "\n".join(output.splitlines()[1:]).strip()
|
||||
raise RuntimeError(details or first_line)
|
||||
raise RuntimeError(output)
|
||||
|
||||
|
||||
def _remove_torrent_data(c: ScgiRtorrentClient, torrent_hash: str) -> dict:
|
||||
data_path = _safe_rm_rf_path(_torrent_data_path(c, torrent_hash))
|
||||
try:
|
||||
@@ -217,7 +339,7 @@ def _remove_torrent_data(c: ScgiRtorrentClient, torrent_hash: str) -> dict:
|
||||
c.call("d.close", torrent_hash)
|
||||
except Exception:
|
||||
pass
|
||||
_rt_execute(c, "execute.throw", "rm", "-rf", data_path)
|
||||
_run_remote_rm(c, data_path)
|
||||
return {"hash": torrent_hash, "removed_path": data_path}
|
||||
|
||||
|
||||
|
||||
@@ -152,6 +152,10 @@ def _run(job_id: str):
|
||||
_emit("operation_started", {"job_id": job_id, "action": job["action"], "profile_id": profile["id"], "hashes": payload.get("hashes") or [], "hash_count": len(payload.get("hashes") or []), "bulk": len(payload.get("hashes") or []) > 1})
|
||||
_emit("job_update", {"id": job_id, "status": "running", "attempts": attempts})
|
||||
result = _execute(profile, job["action"], payload)
|
||||
fresh = _job_row(job_id)
|
||||
# Awaryjne anulowanie: jeżeli użytkownik anuluje zadanie w trakcie pracy, wynik nie nadpisuje statusu cancelled.
|
||||
if fresh and fresh["status"] == "cancelled":
|
||||
return
|
||||
_set_job(job_id, "done", result=result, finished=True)
|
||||
_emit("operation_finished", {"job_id": job_id, "action": job["action"], "profile_id": profile["id"], "hashes": payload.get("hashes") or [], "hash_count": len(payload.get("hashes") or []), "bulk": len(payload.get("hashes") or []) > 1, "result": result})
|
||||
_emit("job_update", {"id": job_id, "status": "done", "result": result})
|
||||
@@ -159,6 +163,9 @@ def _run(job_id: str):
|
||||
fresh = _job_row(job_id) or {}
|
||||
attempts = int(fresh.get("attempts") or 1)
|
||||
max_attempts = int(fresh.get("max_attempts") or 2)
|
||||
# Awaryjne anulowanie: wyjątek z anulowanego zadania nie przywraca go do retry ani failed.
|
||||
if fresh and fresh.get("status") == "cancelled":
|
||||
return
|
||||
status = "pending" if attempts < max_attempts else "failed"
|
||||
_set_job(job_id, status, str(exc), finished=(status == "failed"))
|
||||
_emit("operation_failed", {"job_id": job_id, "action": job.get("action"), "profile_id": job.get("profile_id"), "hashes": payload.get("hashes") or [], "error": str(exc)})
|
||||
@@ -182,6 +189,9 @@ def _job_summary(row: dict, payload: dict, result: dict) -> str:
|
||||
ctx = payload.get("job_context") or {}
|
||||
count = int(ctx.get("hash_count") or len(payload.get("hashes") or []) or result.get("count") or 0)
|
||||
parts = []
|
||||
if ctx.get("bulk_label"):
|
||||
# Note: Shows which generated bulk part is being displayed in the job queue.
|
||||
parts.append(f"{ctx.get('bulk_label')} of {ctx.get('bulk_parts')}")
|
||||
if count:
|
||||
parts.append(("bulk " if count > 1 else "single ") + f"{count} torrent(s)")
|
||||
if ctx.get("target_path"):
|
||||
@@ -226,8 +236,9 @@ def list_jobs(limit: int = 200, offset: int = 0):
|
||||
|
||||
def cancel_job(job_id: str) -> bool:
|
||||
row = _job_row(job_id)
|
||||
if not row or row["status"] not in {"pending", "failed"}:
|
||||
if not row or row["status"] not in {"pending", "running"}:
|
||||
return False
|
||||
# Note: Emergency cancel ma sens tylko dla niedokonczonych zadan; failed/done zostaja tylko do retry albo czyszczenia logow.
|
||||
_set_job(job_id, "cancelled", finished=True)
|
||||
_emit("job_update", {"id": job_id, "status": "cancelled"})
|
||||
return True
|
||||
@@ -239,6 +250,17 @@ def clear_jobs() -> int:
|
||||
return int(cur.rowcount or 0)
|
||||
|
||||
|
||||
def emergency_clear_jobs() -> int:
|
||||
# Awaryjne czyszczenie: najpierw zamyka aktywne zadania jako cancelled, potem czyści całą listę job logów.
|
||||
now = utcnow()
|
||||
with connect() as conn:
|
||||
conn.execute("UPDATE jobs SET status='cancelled', error='Emergency cancelled by user', finished_at=COALESCE(finished_at, ?), updated_at=? WHERE status IN ('pending', 'running')", (now, now))
|
||||
cur = conn.execute("DELETE FROM jobs")
|
||||
deleted = int(cur.rowcount or 0)
|
||||
_emit("job_update", {"status": "cleared", "emergency": True})
|
||||
return deleted
|
||||
|
||||
|
||||
def retry_job(job_id: str) -> bool:
|
||||
row = _job_row(job_id)
|
||||
if not row or row["status"] not in {"failed", "cancelled"}:
|
||||
|
||||
@@ -69,7 +69,7 @@
|
||||
function progressBar(value, extraClass=''){ const pct=Math.max(0,Math.min(100,Number(value||0))); const hue=Math.round((pct/100)*120); const light=30+Math.round((pct/100)*5); const bg=pct<=0?'transparent':pct>=100?'var(--torrent-progress-complete)':`hsl(${hue} 52% ${light}%)`; const done=pct>=100?' is-complete':''; const cls=extraClass?` ${extraClass}`:''; return `<div class="progress torrent-progress${done}${cls}" title="${esc(pct)}%"><div class="progress-bar" style="width:${pct}%;background:${bg}"></div><span>${esc(pct)}%</span></div>`; }
|
||||
function progress(t){ return progressBar(t.progress); }
|
||||
// Note: Displays status filter summaries calculated and cached by the backend API.
|
||||
const FILTER_COUNT_IDS = {all:'countAll', downloading:'countDownloading', seeding:'countSeeding', paused:'countPaused', checking:'countChecking', error:'countError', stopped:'countStopped'};
|
||||
const FILTER_COUNT_IDS = {all:'countAll', downloading:'countDownloading', seeding:'countSeeding', paused:'countPaused', checking:'countChecking', error:'countError', stopped:'countStopped', moving:'countMoving'};
|
||||
function formatFilterBytes(value){ return fmtBytes(value).replace(/\.0 (?=GiB|TiB)/, ' '); }
|
||||
function filterMetaLine(bucket){
|
||||
if(!bucket || !Number(bucket.count||0)) return '';
|
||||
@@ -137,16 +137,25 @@
|
||||
}
|
||||
applyFilterTooltip(button, tooltip, ariaLabel);
|
||||
}
|
||||
function movingOperationRows(){
|
||||
// Note: Filtr Moving bazuje tylko na trwajacych operacjach move, a nie na oczekujacych zadaniach.
|
||||
return [...torrents.values()].filter(t=>{
|
||||
const op=activeOperationFor(t);
|
||||
return op?.action==='move' && op?.state==='running';
|
||||
});
|
||||
}
|
||||
function movingFilterCount(){ return movingOperationRows().length; }
|
||||
function setFilterSummary(type){
|
||||
const el=$(FILTER_COUNT_IDS[type]);
|
||||
if(!el) return;
|
||||
const bucket=torrentSummary?.filters?.[type] || {count:0};
|
||||
const meta=filterMetaLine(bucket, type);
|
||||
const tooltip=filterTooltipLine(bucket, type);
|
||||
const bucket=type==='moving' ? {count:movingFilterCount()} : (torrentSummary?.filters?.[type] || {count:0});
|
||||
const meta=type==='moving' ? '' : filterMetaLine(bucket, type);
|
||||
const tooltip=type==='moving' && bucket.count ? 'Active moving operations' : filterTooltipLine(bucket, type);
|
||||
el.innerHTML=`<span class="filter-count">${esc(bucket.count||0)}</span>${meta?`<span class="filter-meta">${esc(meta)}</span>`:''}`;
|
||||
const button=el.closest('.filter');
|
||||
if(button){
|
||||
const ariaLabel = tooltip ? `${button.dataset.filter || type}: ${tooltip.replace(/\n/g, ', ')}` : '';
|
||||
button.classList.toggle('d-none', type==='moving' && !Number(bucket.count||0));
|
||||
setStableFilterTooltip(button, tooltip, ariaLabel);
|
||||
}
|
||||
}
|
||||
@@ -155,12 +164,19 @@
|
||||
function rowHasLabel(t,label){ return labelNames(t.label).includes(label); }
|
||||
function torrentHasError(t){ return !!torrentWarning(t); }
|
||||
function isChecking(t){ return t?.status==='Checking' || Number(t?.hashing||0)>0; }
|
||||
function rowVisible(t){ const q=($('searchBox')?.value||'').toLowerCase(); if(q && ![t.name,t.path,t.label,t.hash,t.ratio_group].join(' ').toLowerCase().includes(q)) return false; if(activeFilter==='downloading') return !isChecking(t) && !t.complete && t.state && !t.paused; if(activeFilter==='seeding') return !isChecking(t) && t.complete && t.state && !t.paused; if(activeFilter==='paused') return !!t.paused || t.status==='Paused'; if(activeFilter==='checking') return isChecking(t); if(activeFilter==='error') return torrentHasError(t); if(activeFilter==='stopped') return !t.state && !isChecking(t); if(activeFilter.startsWith('label:')) return rowHasLabel(t,activeFilter.slice(6)); return true; }
|
||||
function rowVisible(t){ const q=($('searchBox')?.value||'').toLowerCase(); if(q && ![t.name,t.path,t.label,t.hash,t.ratio_group].join(' ').toLowerCase().includes(q)) return false; if(activeFilter==='downloading') return !isChecking(t) && !t.complete && t.state && !t.paused; if(activeFilter==='seeding') return !isChecking(t) && t.complete && t.state && !t.paused; if(activeFilter==='paused') return !!t.paused || t.status==='Paused'; if(activeFilter==='checking') return isChecking(t); if(activeFilter==='error') return torrentHasError(t); if(activeFilter==='stopped') return !t.state && !isChecking(t); if(activeFilter==='moving') { const op=activeOperationFor(t); return op?.action==='move' && op?.state==='running'; } if(activeFilter.startsWith('label:')) return rowHasLabel(t,activeFilter.slice(6)); return true; }
|
||||
function compareRows(a,b){ const k=sortState.key; let av=a[k], bv=b[k]; if(typeof av==='string'||typeof bv==='string') return String(av||'').localeCompare(String(bv||''))*sortState.dir; return ((Number(av||0)>Number(bv||0))?1:(Number(av||0)<Number(bv||0)?-1:0))*sortState.dir; }
|
||||
function sortIcon(key){ if(sortState.key!==key) return ''; return sortState.dir>0?" <i class='fa-solid fa-caret-up'></i>":" <i class='fa-solid fa-caret-down'></i>"; }
|
||||
function updateSortHeaders(){ document.querySelectorAll('.torrent-table thead th[data-sort]').forEach(th=>{ const base=th.dataset.baseText||th.textContent.trim(); th.dataset.baseText=base; th.innerHTML=`${esc(base)}${sortIcon(th.dataset.sort)}`; th.classList.toggle('sorted',sortState.key===th.dataset.sort); }); }
|
||||
// Note: Refreshes sidebar counters from the cached API summary, not from browser-side aggregation.
|
||||
function syncFilterButtons(){
|
||||
// Note: Klasa active jest synchronizowana po automatycznym powrocie z Moving do All.
|
||||
document.querySelectorAll('.filter').forEach(x=>x.classList.toggle('active', x.dataset.filter===activeFilter));
|
||||
}
|
||||
function renderCounts(){
|
||||
// Note: Gdy ostatnia operacja move sie skonczy, ukryty filtr nie zostawia pustej listy jako aktywnej.
|
||||
if(activeFilter==='moving' && !movingFilterCount()) activeFilter='all';
|
||||
syncFilterButtons();
|
||||
Object.keys(FILTER_COUNT_IDS).forEach(setFilterSummary);
|
||||
$('statSelected').textContent=selected.size;
|
||||
}
|
||||
@@ -179,6 +195,12 @@
|
||||
[...new Set(hashes||[])].filter(Boolean).forEach(hash=>activeOperations.set(hash,{action,jobId,state,label,updatedAt:Date.now()}));
|
||||
scheduleRender(true);
|
||||
}
|
||||
function markQueuedJobs(response, fallbackHashes, action){
|
||||
// Note: Supports API responses that split one large user action into multiple queued bulk parts.
|
||||
const jobs=Array.isArray(response?.jobs)?response.jobs:[];
|
||||
if(jobs.length){ jobs.forEach(job=>markTorrentOperation(job.hashes||[],action,job.job_id,'queued')); return; }
|
||||
markTorrentOperation(fallbackHashes,action,response?.job_id,'queued');
|
||||
}
|
||||
function clearJobOperation(jobId, hashes=[]){
|
||||
if(jobId){ [...activeOperations].forEach(([hash,op])=>{ if(op.jobId===jobId) activeOperations.delete(hash); }); }
|
||||
(hashes||[]).forEach(hash=>activeOperations.delete(hash));
|
||||
@@ -200,7 +222,7 @@
|
||||
function torrentWarning(t){ const msg=String(t.message||'').trim(); if(!msg) return null; const l=msg.toLowerCase(); const patterns=['error','failed','failure','timeout','timed out','tracker','could not','cannot','refused','unreachable','denied']; return patterns.some(p=>l.includes(p)) ? msg : null; }
|
||||
function torrentNameIcon(t){ const m=statusMeta(t); return `<i class="fa-solid ${m.icon} ${m.color}"></i>`; }
|
||||
function renderRow(t){ const labels=labelNames(t.label).map(l=>`<span class="chip label-mini"><i class="fa-solid fa-tag"></i> ${esc(l)}</span>`).join(' '); const warn=torrentWarning(t); const op=activeOperationFor(t); const classes=[selected.has(t.hash)?'selected':'', t.paused?'torrent-paused':'', op?'torrent-operating':'', warn?'torrent-warning':''].filter(Boolean).join(' '); const title=[t.name,warn,op?op.label:''].filter(Boolean).join('\n'); return `<tr data-hash="${esc(t.hash)}" class="${classes}"><td data-col="select" class="sel"><input class="row-check" type="checkbox" ${selected.has(t.hash)?'checked':''}></td><td data-col="name" class="name" title="${esc(title)}">${warn?'<i class="fa-solid fa-triangle-exclamation torrent-warning-icon"></i> ':''}${torrentNameIcon(t)} ${esc(t.name)}</td><td data-col="status">${statusBadge(t)}</td><td data-col="size">${esc(t.size_h)}</td><td data-col="progress">${progress(t)}</td><td data-col="down_rate">${esc(t.down_rate_h)}</td><td data-col="up_rate">${esc(t.up_rate_h)}</td><td data-col="seeds">${esc(t.seeds)}</td><td data-col="peers">${esc(t.peers)}</td><td data-col="ratio">${esc(t.ratio)}</td><td data-col="path" class="path" title="${esc(t.path)}">${esc(t.path)}</td><td data-col="label">${labels||'<span class="text-muted">-</span>'}</td><td data-col="ratio_group">${esc(t.ratio_group||'')}</td></tr>`; }
|
||||
function mobileFilterDefs(){ const arr=[...torrents.values()]; const f=torrentSummary?.filters||{}; const defs=[['all','All',f.all?.count??0],['downloading','Downloading',f.downloading?.count??0],['seeding','Seeding',f.seeding?.count??0],['paused','Paused',f.paused?.count??0],['checking','Checking',f.checking?.count??0],['error','With error',f.error?.count??0],['stopped','Stopped',f.stopped?.count??0]]; const counts=new Map(); arr.forEach(t=>labelNames(t.label).forEach(l=>counts.set(l,(counts.get(l)||0)+1))); [...counts.keys()].sort((a,b)=>a.localeCompare(b)).forEach(l=>defs.push([`label:${l}`,l,counts.get(l),'label'])); return defs; }
|
||||
function mobileFilterDefs(){ const arr=[...torrents.values()]; const f=torrentSummary?.filters||{}; const defs=[['all','All',f.all?.count??0],['downloading','Downloading',f.downloading?.count??0],['seeding','Seeding',f.seeding?.count??0],['paused','Paused',f.paused?.count??0],['checking','Checking',f.checking?.count??0],['error','With error',f.error?.count??0],['stopped','Stopped',f.stopped?.count??0]]; const movingCount=movingFilterCount(); if(movingCount) defs.push(['moving','Moving',movingCount]); const counts=new Map(); arr.forEach(t=>labelNames(t.label).forEach(l=>counts.set(l,(counts.get(l)||0)+1))); [...counts.keys()].sort((a,b)=>a.localeCompare(b)).forEach(l=>defs.push([`label:${l}`,l,counts.get(l),'label'])); return defs; }
|
||||
function renderMobileFilters(){ const bar=$('mobileFilterBar'); if(!bar) return; const allVisible=visibleRows.length>0 && visibleRows.every(t=>selected.has(t.hash)); const someVisible=visibleRows.some(t=>selected.has(t.hash)); const opts=mobileFilterDefs().map(([key,label,count,type])=>`<option value="${esc(key)}" ${activeFilter===key?'selected':''}>${type==='label'?'Label: ':''}${esc(label)} (${count})</option>`).join(''); bar.innerHTML=`<div class="mobile-filter-actions"><button id="mobileSelectAll" class="btn btn-xs ${allVisible?'btn-primary':'btn-outline-primary'}" type="button"><i class="fa-solid fa-check-double"></i> ${allVisible?'Unselect all':'Select all'}</button><button id="mobileClearSelection" class="btn btn-xs btn-outline-secondary" type="button" ${someVisible?'':'disabled'}><i class="fa-solid fa-xmark"></i> Clear</button><span>${selected.size} selected</span></div><div class="mobile-filter-select-row"><label for="mobileFilterSelect"><i class="fa-solid fa-filter"></i> Filter</label><select id="mobileFilterSelect" class="form-select form-select-sm">${opts}</select></div>`; }
|
||||
function renderMobile(){ const list=$('mobileList'); if(!list) return; const src=visibleRows.length?visibleRows:[...torrents.values()].filter(rowVisible).sort(compareRows); const rows=src.slice(0,250); renderMobileFilters(); list.innerHTML=rows.map(t=>{ const warn=torrentWarning(t); const op=activeOperationFor(t); const classes=[selected.has(t.hash)?'selected':'', op?'torrent-operating':'', warn?'torrent-warning':''].filter(Boolean).join(' '); return `<div class="mobile-card ${classes}" data-hash="${esc(t.hash)}" title="${esc(warn||op?.label||'')}"><div class="name">${warn?'<i class="fa-solid fa-triangle-exclamation torrent-warning-icon"></i> ':''}${torrentNameIcon(t)} ${esc(t.name)}</div><div class="small text-muted">${statusBadge(t)} · ${esc(t.progress)}% · Ratio ${esc(t.ratio)}</div><div class="small">DL ${esc(t.down_rate_h)} / UL ${esc(t.up_rate_h)}</div><div class="small text-truncate">${esc(t.path)}</div><div class="mobile-actions"><button class="btn btn-xs btn-outline-success" data-action="start"><i class="fa-solid fa-play"></i></button><button class="btn btn-xs btn-outline-warning" data-action="pause"><i class="fa-solid fa-pause"></i></button><button class="btn btn-xs btn-outline-secondary" data-action="stop"><i class="fa-solid fa-stop"></i></button></div><div class="mobile-progress">${progress(t)}</div></div>`; }).join('') || (hasTorrentSnapshot ? `<div class="empty">No torrents.</div>` : loadingMarkup('Loading torrents...')); }
|
||||
function renderTable(){ updateBulkBar(); renderCounts(); renderLabelFilters(); updateSortHeaders(); buildVisibleRows(); renderMobile(); const body=$('torrentBody'); if(!visibleRows.length){ body.innerHTML=hasTorrentSnapshot?'<tr><td colspan="13" class="empty">No torrents for this filter.</td></tr>':loadingTableRow('Loading torrents...'); return; } const wrap=$('tableWrap'); const start=Math.max(0,Math.floor((wrap?.scrollTop||0)/ROW_HEIGHT)-OVERSCAN); const count=Math.ceil((wrap?.clientHeight||500)/ROW_HEIGHT)+OVERSCAN*2; const end=Math.min(visibleRows.length,start+count); const sig=`${renderVersion}:${start}:${end}:${visibleRows.length}:${sortState.key}:${sortState.dir}:${selected.size}:${activeFilter}:${$('searchBox')?.value||''}:${[...selected].slice(0,30).join(',')}`; if(sig===lastRenderSignature) return; lastRenderSignature=sig; const top=start*ROW_HEIGHT,bottom=Math.max(0,(visibleRows.length-end)*ROW_HEIGHT); body.innerHTML=(top?`<tr class="virtual-spacer"><td colspan="13" style="height:${top}px"></td></tr>`:'')+visibleRows.slice(start,end).map(renderRow).join('')+(bottom?`<tr class="virtual-spacer"><td colspan="13" style="height:${bottom}px"></td></tr>`:''); applyColumnVisibility(); }
|
||||
@@ -211,7 +233,7 @@
|
||||
function setSelectionRange(hash, keepExisting=false){ const current=visibleRows.findIndex(t=>t.hash===hash); const last=visibleRows.findIndex(t=>t.hash===lastSelectedHash); if(current<0 || last<0){ selected.add(hash); lastSelectedHash=hash; return; } if(!keepExisting) selected.clear(); const a=Math.min(current,last), b=Math.max(current,last); visibleRows.slice(a,b+1).forEach(t=>selected.add(t.hash)); selectedHash=hash; }
|
||||
async function post(url,data,method='POST'){ const res=await fetch(url,{method,headers:{'Content-Type':'application/json'},body:JSON.stringify(data||{})}); const json=await res.json(); if(!json.ok) throw new Error(json.error||'Operation failed'); return json; }
|
||||
|
||||
async function runAction(action, extra={}){ const hashes=selectedHashes(); if(!hashes.length) return toast('No torrents selected','warning'); let payload={hashes,...extra}; if(action==='move'){ openPathPicker('move'); return; } setBusy(true); try{ const j=await post(`/api/torrents/${action}`,payload); markTorrentOperation(hashes, action, j.job_id, 'queued'); if(action==='recheck'){ hashes.forEach(h=>{ const t=torrents.get(h); if(t) torrents.set(h,{...t,status:'Checking',hashing:1,message:'Force recheck queued'}); }); scheduleRender(true); } toast(`${action} queued`,'success'); if(action==='set_label') await loadLabels(); }catch(e){toast(e.message,'danger');} finally{setBusy(false);} }
|
||||
async function runAction(action, extra={}){ const hashes=selectedHashes(); if(!hashes.length) return toast('No torrents selected','warning'); let payload={hashes,...extra}; if(action==='move'){ openPathPicker('move'); return; } setBusy(true); try{ const j=await post(`/api/torrents/${action}`,payload); markQueuedJobs(j, hashes, action); if(action==='recheck'){ hashes.forEach(h=>{ const t=torrents.get(h); if(t) torrents.set(h,{...t,status:'Checking',hashing:1,message:'Force recheck queued'}); }); scheduleRender(true); } const parts=Number(j.bulk_parts||1); toast(parts>1?`${action} queued in ${parts} bulk parts`:`${action} queued`,'success'); if(action==='set_label') await loadLabels(); }catch(e){toast(e.message,'danger');} finally{setBusy(false);} }
|
||||
function flag(iso){ const code=String(iso||'').toLowerCase(); return code?`<span class="fi fi-${esc(code)}"></span> <span>${esc(code.toUpperCase())}</span>`:'-'; }
|
||||
function table(headers,rows){ return `<table class="table table-sm detail-table"><thead><tr>${headers.map(h=>`<th>${esc(h)}</th>`).join('')}</tr></thead><tbody>${rows.map(r=>`<tr>${r.map(c=>`<td>${c}</td>`).join('')}</tr>`).join('')}</tbody></table>`; }
|
||||
function renderGeneral(){ const t=torrents.get(selectedHash); const labels=t?labelNames(t.label).map(l=>`<span class="chip label-mini"><i class="fa-solid fa-tag"></i> ${esc(l)}</span>`).join(' '):''; $('detailPane').innerHTML=t?`<div class="general-grid"><div><b>Name</b><span>${esc(t.name)}</span></div><div><b>Hash</b><span>${esc(t.hash)}</span></div><div><b>Path</b><span>${esc(t.path)}</span></div><div><b>Size</b><span>${esc(t.size_h)}</span></div><div><b>Progress</b><span>${esc(t.progress)}%</span></div><div><b>Ratio</b><span>${esc(t.ratio)}</span></div><div><b>Downloaded</b><span>${esc(t.down_total_h)}</span></div><div><b>Uploaded</b><span>${esc(t.up_total_h)}</span></div><div><b>Labels</b><span>${labels||'<span class="text-muted">-</span>'}</span></div><div><b>Ratio group</b><span>${esc(t.ratio_group||'')}</span></div></div>`:'Select a torrent.'; }
|
||||
@@ -305,16 +327,57 @@
|
||||
async function applyDefaultDownloadPath(force=false){ const p=await getDefaultDownloadPath(); ['addPath','rssPath','autoEffectPath'].forEach(id=>{ const el=$(id); if(el && (force || !el.value)) el.value=p; }); return p; }
|
||||
async function openPathPicker(target){ pathTarget=target; const def=await getDefaultDownloadPath(); const initial=def || ($(target)?.value||'/'); $('moveOptions')?.classList.toggle('d-none', target!=='move'); if($('moveDataPhysical')) $('moveDataPhysical').checked=true; if($('moveRecheck')) $('moveRecheck').checked=true; new bootstrap.Modal($('pathModal')).show(); browsePath(initial); }
|
||||
async function browsePath(path){ $('pathList').innerHTML='<span class="spinner-border spinner-border-sm"></span> Loading...'; try{ const res=await fetch(`/api/path/browse?path=${encodeURIComponent(path||'/')}`); const j=await res.json(); if(!j.ok) throw new Error(j.error); $('pathCurrent').value=j.path; lastPathParent=j.parent; $('pathList').innerHTML=j.dirs.map(d=>`<div class="path-row" data-path="${esc(d.path)}"><i class="fa-solid fa-folder"></i><span>${esc(d.name)}</span></div>`).join('')||'<div class="p-3 text-muted">No directories.</div>'; }catch(e){$('pathList').innerHTML=`<div class="text-danger p-2">${esc(e.message)}</div>`;} }
|
||||
$('pathList')?.addEventListener('click',e=>{const r=e.target.closest('.path-row'); if(r) browsePath(r.dataset.path);}); $('pathGoBtn')?.addEventListener('click',()=>browsePath($('pathCurrent').value)); $('pathUpBtn')?.addEventListener('click',()=>browsePath(lastPathParent)); $('pathReloadBtn')?.addEventListener('click',()=>browsePath($('pathCurrent').value)); $('pathSelectBtn')?.addEventListener('click',async()=>{const p=$('pathCurrent').value; if(pathTarget==='move'){ const hashes=selectedHashes(); const j=await post('/api/torrents/move',{hashes,path:p,move_data:!!($('moveDataPhysical')?.checked),recheck:!!($('moveRecheck')?.checked)}); markTorrentOperation(hashes,'move',j.job_id,'queued'); toast($('moveDataPhysical')?.checked?'physical move queued':'move queued','success'); } else if($(pathTarget)) $(pathTarget).value=p; bootstrap.Modal.getInstance($('pathModal'))?.hide();}); document.querySelectorAll('.browse-path').forEach(b=>b.addEventListener('click',()=>openPathPicker(b.dataset.target)));
|
||||
$('pathList')?.addEventListener('click',e=>{const r=e.target.closest('.path-row'); if(r) browsePath(r.dataset.path);}); $('pathGoBtn')?.addEventListener('click',()=>browsePath($('pathCurrent').value)); $('pathUpBtn')?.addEventListener('click',()=>browsePath(lastPathParent)); $('pathReloadBtn')?.addEventListener('click',()=>browsePath($('pathCurrent').value)); $('pathSelectBtn')?.addEventListener('click',async()=>{const p=$('pathCurrent').value; if(pathTarget==='move'){ const hashes=selectedHashes(); const j=await post('/api/torrents/move',{hashes,path:p,move_data:!!($('moveDataPhysical')?.checked),recheck:!!($('moveRecheck')?.checked)}); markQueuedJobs(j,hashes,'move'); const parts=Number(j.bulk_parts||1); toast(parts>1?`move queued in ${parts} bulk parts`:$('moveDataPhysical')?.checked?'physical move queued':'move queued','success'); } else if($(pathTarget)) $(pathTarget).value=p; bootstrap.Modal.getInstance($('pathModal'))?.hide();}); document.querySelectorAll('.browse-path').forEach(b=>b.addEventListener('click',()=>openPathPicker(b.dataset.target)));
|
||||
|
||||
function renderColumnManager(){ const box=$('columnManager'); if(!box) return; box.innerHTML=COLUMN_DEFS.map(([key,label])=>`<label class="column-card form-check form-switch ${hiddenColumns.has(key)?'':'active'}"><input class="form-check-input column-toggle" type="checkbox" data-col-key="${esc(key)}" ${hiddenColumns.has(key)?'':'checked'}><span class="form-check-label"><i class="fa-solid fa-table-columns"></i> ${esc(label)}</span></label>`).join(''); }
|
||||
$('saveColumnsBtn')?.addEventListener('click',async()=>{ document.querySelectorAll('.column-toggle').forEach(cb=>cb.checked?hiddenColumns.delete(cb.dataset.colKey):hiddenColumns.add(cb.dataset.colKey)); applyColumnVisibility(); scheduleRender(true); await post('/api/preferences',{table_columns_json:JSON.stringify({hidden:[...hiddenColumns]})}).catch(e=>toast(e.message,'danger')); toast('Columns saved','success'); });
|
||||
$('resetColumnsBtn')?.addEventListener('click',async()=>{ hiddenColumns.clear(); renderColumnManager(); applyColumnVisibility(); scheduleRender(true); await post('/api/preferences',{table_columns_json:JSON.stringify({hidden:[]})}).catch(()=>{}); });
|
||||
|
||||
async function loadJobs(page=jobsPage){ const box=$('jobsTable'); if(!box)return; jobsPage=Math.max(0,page|0); box.innerHTML='<span class="spinner-border spinner-border-sm"></span> Loading jobs...'; const offset=jobsPage*jobsLimit; const j=await (await fetch(`/api/jobs?limit=${jobsLimit}&offset=${offset}`)).json(); const rows=j.jobs||[]; jobsTotal=Number(j.total||rows.length); const details=r=>{ const count=Number(r.hash_count||0); if(r.is_bulk || count>1) return `<span class="badge text-bg-info">bulk</span><br><span class="text-muted">${esc(count)} torrent(s), details hidden</span>`; const bits=[]; if(count) bits.push(`${esc(count)} torrent`); if(r.summary) bits.push(esc(r.summary)); return bits.join('<br>') || '-'; }; box.innerHTML=table(['Status','Action','Profile','Count','Details','Attempts','Started','Finished','Error','Actions'],rows.map(r=>[`<span class="badge text-bg-${r.status==='done'?'success':r.status==='failed'?'danger':r.status==='running'?'primary':r.status==='cancelled'?'secondary':'warning'}">${esc(r.status)}</span>`,esc(r.action),esc(r.profile_id),esc(r.hash_count||0),details(r),esc(r.attempts||0),dateCell(r.started_at||r.created_at),dateCell(r.finished_at||r.updated_at),compactCell(r.error||'',140),`<button class="btn btn-xs btn-outline-primary job-retry" data-id="${esc(r.id)}"><i class="fa-solid fa-rotate-left"></i> retry</button> <button class="btn btn-xs btn-outline-danger job-cancel" data-id="${esc(r.id)}"><i class="fa-solid fa-ban"></i> cancel</button>`])); renderJobsPager(); }
|
||||
function jobActions(r){ const id=esc(r.id); const status=String(r.status||''); const actions=[]; if(status==='failed'||status==='cancelled') actions.push(`<button class="btn btn-xs btn-outline-primary job-retry" data-id="${id}"><i class="fa-solid fa-rotate-left"></i> retry</button>`); if(status==='pending'||status==='running') actions.push(`<button class="btn btn-xs btn-outline-danger job-cancel" data-id="${id}" data-status="${esc(status)}"><i class="fa-solid fa-triangle-exclamation"></i> emergency cancel</button>`); return actions.join(' ') || '<span class="text-muted">-</span>'; }
|
||||
function jobStatusBadgeClass(status){
|
||||
// Note: Status running oznacza aktywna prace, dlatego uzywa primary zamiast danger; danger zostaje tylko dla failed.
|
||||
const classes={done:'success',failed:'danger',running:'primary',cancelled:'secondary',pending:'warning'};
|
||||
return classes[String(status||'')] || 'warning';
|
||||
}
|
||||
async function loadJobs(page=jobsPage){
|
||||
const box=$('jobsTable');
|
||||
// Note: Finished pokazuje tylko realne finished_at; running/pending nie dostaja daty z updated_at.
|
||||
if(!box) return;
|
||||
jobsPage=Math.max(0,page|0);
|
||||
box.innerHTML='<span class="spinner-border spinner-border-sm"></span> Loading jobs...';
|
||||
const offset=jobsPage*jobsLimit;
|
||||
const j=await (await fetch(`/api/jobs?limit=${jobsLimit}&offset=${offset}`)).json();
|
||||
const rows=j.jobs||[];
|
||||
jobsTotal=Number(j.total||rows.length);
|
||||
const details=r=>{
|
||||
const count=Number(r.hash_count||0);
|
||||
if(r.is_bulk || count>1) return `<span class="badge text-bg-info">bulk</span><br><span class="text-muted">${esc(count)} torrent(s), details hidden</span>`;
|
||||
const bits=[];
|
||||
if(count) bits.push(`${esc(count)} torrent`);
|
||||
if(r.summary) bits.push(esc(r.summary));
|
||||
return bits.join('<br>') || '-';
|
||||
};
|
||||
box.innerHTML=table(
|
||||
['Status','Action','Profile','Count','Details','Attempts','Started','Finished','Error','Actions'],
|
||||
rows.map(r=>[
|
||||
`<span class="badge text-bg-${jobStatusBadgeClass(r.status)}">${esc(r.status)}</span>`,
|
||||
esc(r.action),
|
||||
esc(r.profile_id),
|
||||
esc(r.hash_count||0),
|
||||
details(r),
|
||||
esc(r.attempts||0),
|
||||
dateCell(r.started_at||r.created_at),
|
||||
dateCell(r.finished_at),
|
||||
compactCell(r.error||'',140),
|
||||
jobActions(r),
|
||||
])
|
||||
);
|
||||
renderJobsPager();
|
||||
}
|
||||
function renderJobsPager(){ const p=$('jobsPager'); if(!p)return; const pages=Math.max(1,Math.ceil(jobsTotal/jobsLimit)); p.innerHTML=`<div class="d-flex align-items-center gap-2 flex-wrap"><button class="btn btn-sm btn-outline-secondary" id="jobsPrev" ${jobsPage<=0?'disabled':''}><i class="fa-solid fa-chevron-left"></i> Prev</button><span class="small text-muted">Page ${jobsPage+1} / ${pages} · ${jobsTotal} jobs</span><button class="btn btn-sm btn-outline-secondary" id="jobsNext" ${jobsPage>=pages-1?'disabled':''}>Next <i class="fa-solid fa-chevron-right"></i></button></div>`; $('jobsPrev')?.addEventListener('click',()=>loadJobs(jobsPage-1)); $('jobsNext')?.addEventListener('click',()=>loadJobs(jobsPage+1)); }
|
||||
$('jobsModal')?.addEventListener('show.bs.modal',loadJobs); $('refreshJobsBtn')?.addEventListener('click',loadJobs); $('jobsTable')?.addEventListener('click',async e=>{ const btn=e.target.closest('.job-retry,.job-cancel'); if(!btn)return; const id=btn.dataset.id; if(!id)return; if(btn.classList.contains('job-retry')) await post(`/api/jobs/${id}/retry`,{}).catch(x=>toast(x.message,'danger')); if(btn.classList.contains('job-cancel')) await post(`/api/jobs/${id}/cancel`,{}).catch(x=>toast(x.message,'danger')); loadJobs(); });
|
||||
$('clearJobsBtn')?.addEventListener('click',async()=>{ if(!confirm('Clear finished job logs? Pending and running jobs will stay.')) return; try{ const j=await post('/api/jobs/clear',{}); toast(`Cleared ${j.deleted||0} job log(s)`,'success'); jobsPage=0; loadJobs(0); }catch(e){ toast(e.message,'danger'); } });
|
||||
// Note: Przyciski w job logu sa zalezne od statusu: failed ma retry, a emergency cancel tylko pending/running.
|
||||
$('jobsModal')?.addEventListener('show.bs.modal',loadJobs); $('refreshJobsBtn')?.addEventListener('click',loadJobs); $('jobsTable')?.addEventListener('click',async e=>{ const btn=e.target.closest('.job-retry,.job-cancel'); if(!btn)return; const id=btn.dataset.id; if(!id)return; if(btn.classList.contains('job-retry')) await post(`/api/jobs/${id}/retry`,{}).catch(x=>toast(x.message,'danger')); if(btn.classList.contains('job-cancel')){ const st=btn.dataset.status||''; if((st==='pending'||st==='running') && !confirm('Emergency cancel this unfinished job?')) return; await post(`/api/jobs/${id}/cancel`,{}).catch(x=>toast(x.message,'danger')); } loadJobs(); });
|
||||
$('clearJobsBtn')?.addEventListener('click',async()=>{ const emergency=confirm('Emergency clear all job logs, including unfinished jobs? OK = emergency clear, Cancel = clear only finished logs.'); if(!emergency && !confirm('Clear finished job logs? Pending and running jobs will stay.')) return; try{ const j=await post(`/api/jobs/clear${emergency?'?force=1':''}`,{}); toast(`${emergency?'Emergency cleared':'Cleared'} ${j.deleted||0} job log(s)`,'success'); jobsPage=0; loadJobs(0); }catch(e){ toast(e.message,'danger'); } });
|
||||
|
||||
async function loadLabels(){ const j=await (await fetch('/api/labels')).json(); const labels=j.labels||[]; knownLabels=labels; renderLabelFilters(); renderLabelChooser(); if($('labelsManager')) $('labelsManager').innerHTML=labels.length?labels.map(l=>`<div class="label-manager-row"><span class="chip"><i class="fa-solid fa-tag"></i> ${esc(l.name)}</span><button class="btn btn-xs btn-outline-danger delete-label" data-id="${esc(l.id)}" title="Delete label"><i class="fa-solid fa-trash"></i></button></div>`).join(''):'<span class="text-muted">No labels.</span>'; }
|
||||
function renderLabelChooser(){ if($('selectedLabelList')) $('selectedLabelList').innerHTML=[...modalLabels].map(l=>`<button class="chip label-selected" data-label="${esc(l)}" title="Remove"><i class="fa-solid fa-tag"></i> ${esc(l)} <i class="fa-solid fa-xmark ms-1"></i></button>`).join('') || '<span class="text-muted small">No labels selected.</span>'; if($('labelList')) $('labelList').innerHTML=knownLabels.map(l=>`<button class="chip label-chip ${modalLabels.has(l.name)?'active':''}" data-label="${esc(l.name)}"><i class="fa-solid fa-tag"></i> ${esc(l.name)}</button>`).join('') || '<span class="text-muted small">No saved labels.</span>'; }
|
||||
@@ -471,7 +534,7 @@
|
||||
cleanupCountCard('Smart Queue logs', data.smart_queue_history_total, `retention ${retention.smart_queue_history||'-'} days`),
|
||||
cleanupCountCard('Database size', db.size_h||db.size||'-', db.path||'')
|
||||
];
|
||||
box.innerHTML=`<div class="cleanup-grid">${cards.join('')}</div><div class="cleanup-actions mt-3"><button id="cleanupJobsBtn" class="btn btn-sm btn-outline-danger"><i class="fa-solid fa-trash"></i> Clear job logs</button><button id="cleanupSmartQueueBtn" class="btn btn-sm btn-outline-danger"><i class="fa-solid fa-trash"></i> Clear Smart Queue logs</button><button id="cleanupAllBtn" class="btn btn-sm btn-danger"><i class="fa-solid fa-broom"></i> Clear both</button><button id="cleanupRefreshBtn" class="btn btn-sm btn-outline-secondary"><i class="fa-solid fa-rotate"></i> Refresh</button></div><div class="tool-note mt-2">Job cleanup uses the existing job endpoint logic, so pending and running jobs are preserved.</div>`;
|
||||
box.innerHTML=`<div class="cleanup-grid">${cards.join('')}</div><div class="cleanup-actions mt-3"><button id="cleanupJobsBtn" class="btn btn-sm btn-outline-danger"><i class="fa-solid fa-trash"></i> Clear job logs</button><button id="cleanupSmartQueueBtn" class="btn btn-sm btn-outline-danger"><i class="fa-solid fa-trash"></i> Clear Smart Queue logs</button><button id="cleanupAllBtn" class="btn btn-sm btn-danger"><i class="fa-solid fa-broom"></i> Clear both</button><button id="cleanupRefreshBtn" class="btn btn-sm btn-outline-secondary"><i class="fa-solid fa-rotate"></i> Refresh</button></div><div class="tool-note mt-2">Job cleanup preserves pending and running jobs. Use Jobs modal for emergency clear when unfinished jobs must be removed.</div>`;
|
||||
}
|
||||
async function loadCleanup(){
|
||||
const box=$('cleanupManager'); if(!box) return;
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -54,6 +54,7 @@
|
||||
<button class="filter" data-filter="checking"><span><i class="fa-solid fa-rotate me-1"></i>Checking</span> <span id="countChecking">0</span></button>
|
||||
<button class="filter" data-filter="error"><span><i class="fa-solid fa-triangle-exclamation me-1"></i>With error</span> <span id="countError">0</span></button>
|
||||
<button class="filter" data-filter="stopped"><span><i class="fa-solid fa-stop me-1"></i>Stopped</span> <span id="countStopped">0</span></button>
|
||||
<button class="filter d-none" data-filter="moving"><span><i class="fa-solid fa-folder-open me-1"></i>Moving</span> <span id="countMoving">0</span></button>
|
||||
<div id="labelFilters" class="label-filters mt-2"></div>
|
||||
<hr>
|
||||
<div class="small text-muted px-2">Shortcuts</div>
|
||||
|
||||
@@ -4,3 +4,4 @@ python-dotenv>=1.0
|
||||
geoip2>=4.8
|
||||
psutil>=5.9
|
||||
simple-websocket>=1.0
|
||||
gunicorn>=22.0
|
||||
|
||||
@@ -1,16 +1,25 @@
|
||||
[Unit]
|
||||
Description=pyTorrent web UI for rTorrent
|
||||
After=network.target
|
||||
Description=pyTorrent Web UI
|
||||
After=network-online.target
|
||||
Wants=network-online.target
|
||||
|
||||
[Service]
|
||||
Type=simple
|
||||
WorkingDirectory=/opt/pytorrent
|
||||
EnvironmentFile=/opt/pytorrent/.env
|
||||
ExecStart=/opt/pytorrent/venv/bin/python /opt/pytorrent/app.py
|
||||
#User=root
|
||||
#Group=root
|
||||
User=pytorrent
|
||||
Group=pytorrent
|
||||
WorkingDirectory=/opt/pyTorrent
|
||||
Environment="PYTHONUNBUFFERED=1"
|
||||
EnvironmentFile=/opt/pyTorrent/.env
|
||||
# Note: threaded Gunicorn preserves Flask-SocketIO background tasks without running Werkzeug in production.
|
||||
ExecStart=/opt/pyTorrent/venv/bin/gunicorn --worker-class gthread --workers 1 --threads 32 --bind ${PYTORRENT_HOST}:${PYTORRENT_PORT} --access-logfile - --error-logfile - wsgi:app
|
||||
Restart=always
|
||||
RestartSec=3
|
||||
User=www-data
|
||||
Group=www-data
|
||||
KillSignal=SIGINT
|
||||
TimeoutStopSec=20
|
||||
NoNewPrivileges=true
|
||||
PrivateTmp=true
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
Reference in New Issue
Block a user