add auth support
This commit is contained in:
344
pytorrent/services/auth.py
Normal file
344
pytorrent/services/auth.py
Normal file
@@ -0,0 +1,344 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from functools import wraps
|
||||
from typing import Any
|
||||
from urllib.parse import urlparse
|
||||
|
||||
from flask import abort, jsonify, redirect, request, session, url_for
|
||||
from werkzeug.security import check_password_hash, generate_password_hash
|
||||
|
||||
from ..config import AUTH_ENABLE
|
||||
from ..db import connect, default_user_id, utcnow
|
||||
|
||||
PUBLIC_ENDPOINTS = {"main.login", "main.logout", "api.auth_login", "api.auth_me", "static"}
|
||||
RTORRENT_WRITE_PREFIXES = (
|
||||
"/api/torrents/",
|
||||
"/api/speed/limits",
|
||||
"/api/labels",
|
||||
"/api/ratio-groups",
|
||||
"/api/rss",
|
||||
"/api/smart-queue",
|
||||
"/api/automations",
|
||||
"/api/jobs",
|
||||
)
|
||||
RTORRENT_CONFIG_PREFIXES = ("/api/rtorrent-config",)
|
||||
ADMIN_PREFIXES = ("/api/auth/users", "/api/profiles")
|
||||
# Note: API reads that expose rTorrent/profile data must also respect profile permissions.
|
||||
PROFILE_READ_PREFIXES = (
|
||||
"/api/torrents",
|
||||
"/api/torrent-stats",
|
||||
"/api/system/status",
|
||||
"/api/app/status",
|
||||
"/api/port-check",
|
||||
"/api/path",
|
||||
"/api/labels",
|
||||
"/api/ratio-groups",
|
||||
"/api/rss",
|
||||
"/api/rtorrent-config",
|
||||
"/api/smart-queue",
|
||||
"/api/traffic/history",
|
||||
"/api/automations",
|
||||
)
|
||||
|
||||
|
||||
def enabled() -> bool:
|
||||
return bool(AUTH_ENABLE)
|
||||
|
||||
|
||||
def password_hash(password: str) -> str:
|
||||
return generate_password_hash(password or "")
|
||||
|
||||
|
||||
def current_user_id() -> int:
|
||||
if not enabled():
|
||||
return default_user_id()
|
||||
try:
|
||||
return int(session.get("user_id") or 0)
|
||||
except Exception:
|
||||
return 0
|
||||
|
||||
|
||||
def current_user() -> dict[str, Any] | None:
|
||||
uid = current_user_id()
|
||||
if not uid:
|
||||
return None
|
||||
with connect() as conn:
|
||||
return conn.execute(
|
||||
"SELECT id, username, role, is_active, created_at, updated_at FROM users WHERE id=?",
|
||||
(uid,),
|
||||
).fetchone()
|
||||
|
||||
|
||||
def is_admin(user: dict[str, Any] | None = None) -> bool:
|
||||
if not enabled():
|
||||
return True
|
||||
user = user or current_user()
|
||||
return bool(user and user.get("role") == "admin" and int(user.get("is_active") or 0))
|
||||
|
||||
|
||||
def _permissions(user_id: int | None = None) -> list[dict[str, Any]]:
|
||||
if not enabled():
|
||||
return [{"profile_id": 0, "access_level": "full"}]
|
||||
uid = user_id or current_user_id()
|
||||
if not uid:
|
||||
return []
|
||||
with connect() as conn:
|
||||
return conn.execute(
|
||||
"SELECT profile_id, access_level FROM user_profile_permissions WHERE user_id=?",
|
||||
(uid,),
|
||||
).fetchall()
|
||||
|
||||
|
||||
def can_access_profile(profile_id: int | None, user_id: int | None = None) -> bool:
|
||||
if not enabled():
|
||||
return True
|
||||
uid = user_id or current_user_id()
|
||||
if not uid:
|
||||
return False
|
||||
with connect() as conn:
|
||||
user = conn.execute("SELECT role, is_active FROM users WHERE id=?", (uid,)).fetchone()
|
||||
if not user or not int(user.get("is_active") or 0):
|
||||
return False
|
||||
if user.get("role") == "admin":
|
||||
return True
|
||||
pid = int(profile_id or 0)
|
||||
row = conn.execute(
|
||||
"SELECT 1 FROM user_profile_permissions WHERE user_id=? AND (profile_id=0 OR profile_id=?) LIMIT 1",
|
||||
(uid, pid),
|
||||
).fetchone()
|
||||
return bool(row)
|
||||
|
||||
|
||||
def can_write_profile(profile_id: int | None, user_id: int | None = None) -> bool:
|
||||
if not enabled():
|
||||
return True
|
||||
uid = user_id or current_user_id()
|
||||
if not uid:
|
||||
return False
|
||||
with connect() as conn:
|
||||
user = conn.execute("SELECT role, is_active FROM users WHERE id=?", (uid,)).fetchone()
|
||||
if not user or not int(user.get("is_active") or 0):
|
||||
return False
|
||||
if user.get("role") == "admin":
|
||||
return True
|
||||
pid = int(profile_id or 0)
|
||||
row = conn.execute(
|
||||
"SELECT access_level FROM user_profile_permissions WHERE user_id=? AND (profile_id=0 OR profile_id=?) ORDER BY profile_id DESC LIMIT 1",
|
||||
(uid, pid),
|
||||
).fetchone()
|
||||
return bool(row and row.get("access_level") == "full")
|
||||
|
||||
|
||||
def visible_profile_ids(user_id: int | None = None) -> set[int] | None:
|
||||
if not enabled():
|
||||
return None
|
||||
uid = user_id or current_user_id()
|
||||
if not uid:
|
||||
return set()
|
||||
with connect() as conn:
|
||||
user = conn.execute("SELECT role, is_active FROM users WHERE id=?", (uid,)).fetchone()
|
||||
if not user or not int(user.get("is_active") or 0):
|
||||
return set()
|
||||
if user.get("role") == "admin":
|
||||
return None
|
||||
rows = conn.execute("SELECT profile_id FROM user_profile_permissions WHERE user_id=?", (uid,)).fetchall()
|
||||
if any(int(row.get("profile_id") or 0) == 0 for row in rows):
|
||||
return None
|
||||
return {int(row.get("profile_id") or 0) for row in rows}
|
||||
|
||||
|
||||
|
||||
def same_origin_request() -> bool:
|
||||
"""Return False only when an unsafe request clearly comes from another origin."""
|
||||
origin = request.headers.get("Origin") or request.headers.get("Referer")
|
||||
if not origin:
|
||||
return True
|
||||
try:
|
||||
parsed = urlparse(origin)
|
||||
return parsed.scheme == request.scheme and parsed.netloc == request.host
|
||||
except Exception:
|
||||
return False
|
||||
|
||||
|
||||
def writable_profile_ids(user_id: int | None = None) -> set[int] | None:
|
||||
if not enabled():
|
||||
return None
|
||||
uid = user_id or current_user_id()
|
||||
if not uid:
|
||||
return set()
|
||||
with connect() as conn:
|
||||
user = conn.execute("SELECT role, is_active FROM users WHERE id=?", (uid,)).fetchone()
|
||||
if not user or not int(user.get("is_active") or 0):
|
||||
return set()
|
||||
if user.get("role") == "admin":
|
||||
return None
|
||||
rows = conn.execute("SELECT profile_id FROM user_profile_permissions WHERE user_id=? AND access_level='full'", (uid,)).fetchall()
|
||||
if any(int(row.get("profile_id") or 0) == 0 for row in rows):
|
||||
return None
|
||||
return {int(row.get("profile_id") or 0) for row in rows}
|
||||
|
||||
def require_admin() -> None:
|
||||
if enabled() and not is_admin():
|
||||
abort(403)
|
||||
|
||||
|
||||
def require_profile_read(profile_id: int | None) -> None:
|
||||
if enabled() and not can_access_profile(profile_id):
|
||||
abort(403)
|
||||
|
||||
|
||||
def require_profile_write(profile_id: int | None) -> None:
|
||||
if enabled() and not can_write_profile(profile_id):
|
||||
abort(403)
|
||||
|
||||
|
||||
def login_user(username: str, password: str) -> dict[str, Any] | None:
|
||||
if not enabled():
|
||||
return {"id": default_user_id(), "username": "default", "role": "admin", "is_active": 1}
|
||||
with connect() as conn:
|
||||
user = conn.execute("SELECT * FROM users WHERE username=?", (username.strip(),)).fetchone()
|
||||
if not user or not int(user.get("is_active") or 0):
|
||||
return None
|
||||
if not user.get("password_hash") or not check_password_hash(user.get("password_hash"), password or ""):
|
||||
return None
|
||||
session.clear()
|
||||
session["user_id"] = int(user["id"])
|
||||
session["username"] = user["username"]
|
||||
session["role"] = user.get("role") or "user"
|
||||
return current_user()
|
||||
|
||||
|
||||
def logout_user() -> None:
|
||||
session.clear()
|
||||
|
||||
|
||||
def ensure_admin_user() -> None:
|
||||
if not enabled():
|
||||
return
|
||||
now = utcnow()
|
||||
with connect() as conn:
|
||||
row = conn.execute("SELECT id FROM users WHERE username='admin'").fetchone()
|
||||
if not row:
|
||||
conn.execute(
|
||||
"INSERT INTO users(username,password_hash,role,is_active,created_at,updated_at) VALUES(?,?,?,?,?,?)",
|
||||
("admin", password_hash("admin"), "admin", 1, now, now),
|
||||
)
|
||||
else:
|
||||
conn.execute("UPDATE users SET role='admin', is_active=1, updated_at=? WHERE username='admin'", (now,))
|
||||
|
||||
|
||||
def list_users() -> list[dict[str, Any]]:
|
||||
require_admin()
|
||||
with connect() as conn:
|
||||
users = conn.execute(
|
||||
"SELECT id, username, role, is_active, created_at, updated_at FROM users ORDER BY username COLLATE NOCASE"
|
||||
).fetchall()
|
||||
perms = conn.execute(
|
||||
"SELECT user_id, profile_id, access_level FROM user_profile_permissions ORDER BY user_id, profile_id"
|
||||
).fetchall()
|
||||
by_user: dict[int, list[dict[str, Any]]] = {}
|
||||
for perm in perms:
|
||||
by_user.setdefault(int(perm["user_id"]), []).append({
|
||||
"profile_id": int(perm.get("profile_id") or 0),
|
||||
"access_level": perm.get("access_level") or "ro",
|
||||
})
|
||||
for user in users:
|
||||
user["permissions"] = by_user.get(int(user["id"]), [])
|
||||
return users
|
||||
|
||||
|
||||
def save_user(data: dict[str, Any], user_id: int | None = None) -> dict[str, Any]:
|
||||
require_admin()
|
||||
now = utcnow()
|
||||
username = str(data.get("username") or "").strip()
|
||||
role = "admin" if data.get("role") == "admin" else "user"
|
||||
is_active = 1 if data.get("is_active", True) else 0
|
||||
if not username:
|
||||
raise ValueError("Username is required")
|
||||
with connect() as conn:
|
||||
if user_id:
|
||||
row = conn.execute("SELECT id FROM users WHERE id=?", (user_id,)).fetchone()
|
||||
if not row:
|
||||
raise ValueError("User does not exist")
|
||||
conn.execute(
|
||||
"UPDATE users SET username=?, role=?, is_active=?, updated_at=? WHERE id=?",
|
||||
(username, role, is_active, now, user_id),
|
||||
)
|
||||
else:
|
||||
cur = conn.execute(
|
||||
"INSERT INTO users(username,password_hash,role,is_active,created_at,updated_at) VALUES(?,?,?,?,?,?)",
|
||||
(username, password_hash(str(data.get("password") or username)), role, is_active, now, now),
|
||||
)
|
||||
user_id = int(cur.lastrowid)
|
||||
if data.get("password"):
|
||||
conn.execute("UPDATE users SET password_hash=?, updated_at=? WHERE id=?", (password_hash(str(data.get("password"))), now, user_id))
|
||||
if role != "admin":
|
||||
conn.execute("DELETE FROM user_profile_permissions WHERE user_id=?", (user_id,))
|
||||
for item in data.get("permissions") or []:
|
||||
profile_id = int(item.get("profile_id") or 0)
|
||||
access = "full" if item.get("access_level") == "full" else "ro"
|
||||
conn.execute(
|
||||
"INSERT OR REPLACE INTO user_profile_permissions(user_id,profile_id,access_level,created_at,updated_at) VALUES(?,?,?,?,?)",
|
||||
(user_id, profile_id, access, now, now),
|
||||
)
|
||||
else:
|
||||
conn.execute("DELETE FROM user_profile_permissions WHERE user_id=?", (user_id,))
|
||||
return conn.execute("SELECT id, username, role, is_active, created_at, updated_at FROM users WHERE id=?", (user_id,)).fetchone()
|
||||
|
||||
|
||||
def delete_user(user_id: int) -> None:
|
||||
require_admin()
|
||||
if int(user_id) == current_user_id():
|
||||
raise ValueError("Cannot delete current user")
|
||||
with connect() as conn:
|
||||
conn.execute("DELETE FROM user_profile_permissions WHERE user_id=?", (user_id,))
|
||||
conn.execute("DELETE FROM users WHERE id=? AND username <> 'admin'", (user_id,))
|
||||
|
||||
|
||||
def install_guards(app) -> None:
|
||||
@app.before_request
|
||||
def _auth_guard():
|
||||
if not enabled():
|
||||
return None
|
||||
endpoint = request.endpoint or ""
|
||||
if endpoint in PUBLIC_ENDPOINTS or endpoint.startswith("static"):
|
||||
return None
|
||||
if not current_user_id():
|
||||
if request.path.startswith("/api/"):
|
||||
return jsonify({"ok": False, "error": "Authentication required"}), 401
|
||||
return redirect(url_for("main.login", next=request.full_path if request.query_string else request.path))
|
||||
user = current_user()
|
||||
if not user or not int(user.get("is_active") or 0):
|
||||
logout_user()
|
||||
return jsonify({"ok": False, "error": "Authentication required"}), 401 if request.path.startswith("/api/") else redirect(url_for("main.login"))
|
||||
if request.path.startswith("/api/auth/users") and not is_admin(user):
|
||||
return jsonify({"ok": False, "error": "Admin only"}), 403
|
||||
if request.path.startswith(PROFILE_READ_PREFIXES):
|
||||
profile_id = _request_profile_id()
|
||||
if profile_id and not can_access_profile(profile_id):
|
||||
return jsonify({"ok": False, "error": "Profile access denied"}), 403
|
||||
if request.method not in {"GET", "HEAD", "OPTIONS"}:
|
||||
if request.path.startswith("/api/") and not same_origin_request():
|
||||
return jsonify({"ok": False, "error": "Cross-origin API request blocked"}), 403
|
||||
if request.path.startswith("/api/profiles") and not request.path.endswith("/activate") and not is_admin(user):
|
||||
return jsonify({"ok": False, "error": "Admin only"}), 403
|
||||
profile_id = _request_profile_id()
|
||||
if request.path.startswith(RTORRENT_CONFIG_PREFIXES) and not can_write_profile(profile_id):
|
||||
return jsonify({"ok": False, "error": "Read-only profile access"}), 403
|
||||
if request.path.startswith(RTORRENT_WRITE_PREFIXES) and not can_write_profile(profile_id):
|
||||
return jsonify({"ok": False, "error": "Read-only profile access"}), 403
|
||||
return None
|
||||
|
||||
|
||||
def _request_profile_id() -> int | None:
|
||||
if request.view_args and request.view_args.get("profile_id"):
|
||||
return int(request.view_args["profile_id"])
|
||||
try:
|
||||
payload = request.get_json(silent=True) or {}
|
||||
if payload.get("profile_id"):
|
||||
return int(payload.get("profile_id"))
|
||||
except Exception:
|
||||
pass
|
||||
from . import preferences
|
||||
profile = preferences.active_profile()
|
||||
return int(profile["id"]) if profile else None
|
||||
@@ -3,6 +3,7 @@ from __future__ import annotations
|
||||
import json
|
||||
|
||||
from ..db import connect, utcnow, default_user_id
|
||||
from . import auth
|
||||
|
||||
BOOTSTRAP_THEMES = {
|
||||
"default": "Default Bootstrap",
|
||||
@@ -34,43 +35,44 @@ def bootstrap_css_url(theme: str | None) -> str:
|
||||
|
||||
|
||||
def list_profiles(user_id: int | None = None):
|
||||
user_id = user_id or default_user_id()
|
||||
user_id = user_id or auth.current_user_id() or default_user_id()
|
||||
visible = auth.visible_profile_ids(user_id)
|
||||
with connect() as conn:
|
||||
if visible is None:
|
||||
return conn.execute(
|
||||
"SELECT * FROM rtorrent_profiles ORDER BY is_default DESC, name COLLATE NOCASE"
|
||||
).fetchall()
|
||||
if not visible:
|
||||
return []
|
||||
placeholders = ",".join("?" for _ in visible)
|
||||
return conn.execute(
|
||||
"SELECT * FROM rtorrent_profiles WHERE user_id=? ORDER BY is_default DESC, name COLLATE NOCASE",
|
||||
(user_id,),
|
||||
f"SELECT * FROM rtorrent_profiles WHERE id IN ({placeholders}) ORDER BY is_default DESC, name COLLATE NOCASE",
|
||||
tuple(visible),
|
||||
).fetchall()
|
||||
|
||||
|
||||
def get_profile(profile_id: int, user_id: int | None = None):
|
||||
user_id = user_id or default_user_id()
|
||||
user_id = user_id or auth.current_user_id() or default_user_id()
|
||||
if not auth.can_access_profile(profile_id, user_id):
|
||||
return None
|
||||
with connect() as conn:
|
||||
return conn.execute(
|
||||
"SELECT * FROM rtorrent_profiles WHERE id=? AND user_id=?",
|
||||
(profile_id, user_id),
|
||||
).fetchone()
|
||||
return conn.execute("SELECT * FROM rtorrent_profiles WHERE id=?", (profile_id,)).fetchone()
|
||||
|
||||
|
||||
def active_profile(user_id: int | None = None):
|
||||
user_id = user_id or default_user_id()
|
||||
user_id = user_id or auth.current_user_id() or default_user_id()
|
||||
with connect() as conn:
|
||||
pref = conn.execute("SELECT active_rtorrent_id FROM user_preferences WHERE user_id=?", (user_id,)).fetchone()
|
||||
if pref and pref.get("active_rtorrent_id"):
|
||||
row = conn.execute(
|
||||
"SELECT * FROM rtorrent_profiles WHERE id=? AND user_id=?",
|
||||
(pref["active_rtorrent_id"], user_id),
|
||||
).fetchone()
|
||||
if pref and pref.get("active_rtorrent_id") and auth.can_access_profile(int(pref["active_rtorrent_id"]), user_id):
|
||||
row = conn.execute("SELECT * FROM rtorrent_profiles WHERE id=?", (pref["active_rtorrent_id"],)).fetchone()
|
||||
if row:
|
||||
return row
|
||||
row = conn.execute(
|
||||
"SELECT * FROM rtorrent_profiles WHERE user_id=? ORDER BY is_default DESC, id ASC LIMIT 1",
|
||||
(user_id,),
|
||||
).fetchone()
|
||||
return row
|
||||
profiles = list_profiles(user_id)
|
||||
return profiles[0] if profiles else None
|
||||
|
||||
|
||||
def save_profile(data: dict, user_id: int | None = None):
|
||||
user_id = user_id or default_user_id()
|
||||
user_id = user_id or auth.current_user_id() or default_user_id()
|
||||
now = utcnow()
|
||||
name = str(data.get("name") or "rTorrent").strip()
|
||||
scgi_url = str(data.get("scgi_url") or "").strip()
|
||||
@@ -79,7 +81,7 @@ def save_profile(data: dict, user_id: int | None = None):
|
||||
is_remote = 1 if data.get("is_remote") else 0
|
||||
is_default = 1 if data.get("is_default") else 0
|
||||
if not scgi_url.startswith("scgi://"):
|
||||
raise ValueError("SCGI URL musi zaczynać się od scgi://")
|
||||
raise ValueError("SCGI URL must start with scgi://")
|
||||
with connect() as conn:
|
||||
if is_default:
|
||||
conn.execute("UPDATE rtorrent_profiles SET is_default=0 WHERE user_id=?", (user_id,))
|
||||
@@ -94,11 +96,11 @@ def save_profile(data: dict, user_id: int | None = None):
|
||||
"UPDATE user_preferences SET active_rtorrent_id=?, updated_at=? WHERE user_id=?",
|
||||
(profile_id, now, user_id),
|
||||
)
|
||||
return conn.execute("SELECT * FROM rtorrent_profiles WHERE id=? AND user_id=?", (profile_id, user_id)).fetchone()
|
||||
return conn.execute("SELECT * FROM rtorrent_profiles WHERE id=?", (profile_id,)).fetchone()
|
||||
|
||||
|
||||
def update_profile(profile_id: int, data: dict, user_id: int | None = None):
|
||||
user_id = user_id or default_user_id()
|
||||
user_id = user_id or auth.current_user_id() or default_user_id()
|
||||
now = utcnow()
|
||||
name = str(data.get("name") or "rTorrent").strip()
|
||||
scgi_url = str(data.get("scgi_url") or "").strip()
|
||||
@@ -107,24 +109,25 @@ def update_profile(profile_id: int, data: dict, user_id: int | None = None):
|
||||
is_remote = 1 if data.get("is_remote") else 0
|
||||
is_default = 1 if data.get("is_default") else 0
|
||||
if not scgi_url.startswith("scgi://"):
|
||||
raise ValueError("SCGI URL musi zaczynać się od scgi://")
|
||||
raise ValueError("SCGI URL must start with scgi://")
|
||||
with connect() as conn:
|
||||
row = conn.execute("SELECT id FROM rtorrent_profiles WHERE id=? AND user_id=?", (profile_id, user_id)).fetchone()
|
||||
if not row:
|
||||
row = conn.execute("SELECT id FROM rtorrent_profiles WHERE id=?", (profile_id,)).fetchone()
|
||||
if not row or not auth.can_write_profile(profile_id, user_id):
|
||||
raise ValueError("Profil nie istnieje")
|
||||
if is_default:
|
||||
conn.execute("UPDATE rtorrent_profiles SET is_default=0 WHERE user_id=?", (user_id,))
|
||||
conn.execute(
|
||||
"UPDATE rtorrent_profiles SET name=?, scgi_url=?, is_default=?, timeout_seconds=?, max_parallel_jobs=?, is_remote=?, updated_at=? WHERE id=? AND user_id=?",
|
||||
(name, scgi_url, is_default, timeout, max_parallel, is_remote, now, profile_id, user_id),
|
||||
"UPDATE rtorrent_profiles SET name=?, scgi_url=?, is_default=?, timeout_seconds=?, max_parallel_jobs=?, is_remote=?, updated_at=? WHERE id=?",
|
||||
(name, scgi_url, is_default, timeout, max_parallel, is_remote, now, profile_id),
|
||||
)
|
||||
return conn.execute("SELECT * FROM rtorrent_profiles WHERE id=? AND user_id=?", (profile_id, user_id)).fetchone()
|
||||
return conn.execute("SELECT * FROM rtorrent_profiles WHERE id=?", (profile_id,)).fetchone()
|
||||
|
||||
|
||||
def delete_profile(profile_id: int, user_id: int | None = None):
|
||||
user_id = user_id or default_user_id()
|
||||
user_id = user_id or auth.current_user_id() or default_user_id()
|
||||
auth.require_profile_write(profile_id)
|
||||
with connect() as conn:
|
||||
conn.execute("DELETE FROM rtorrent_profiles WHERE id=? AND user_id=?", (profile_id, user_id))
|
||||
conn.execute("DELETE FROM rtorrent_profiles WHERE id=?", (profile_id,))
|
||||
active = active_profile(user_id)
|
||||
conn.execute(
|
||||
"UPDATE user_preferences SET active_rtorrent_id=?, updated_at=? WHERE user_id=?",
|
||||
@@ -133,10 +136,10 @@ def delete_profile(profile_id: int, user_id: int | None = None):
|
||||
|
||||
|
||||
def activate_profile(profile_id: int, user_id: int | None = None):
|
||||
user_id = user_id or default_user_id()
|
||||
user_id = user_id or auth.current_user_id() or default_user_id()
|
||||
with connect() as conn:
|
||||
row = conn.execute("SELECT id FROM rtorrent_profiles WHERE id=? AND user_id=?", (profile_id, user_id)).fetchone()
|
||||
if not row:
|
||||
row = conn.execute("SELECT id FROM rtorrent_profiles WHERE id=?", (profile_id,)).fetchone()
|
||||
if not row or not auth.can_access_profile(profile_id, user_id):
|
||||
raise ValueError("Profil nie istnieje")
|
||||
conn.execute(
|
||||
"UPDATE user_preferences SET active_rtorrent_id=?, updated_at=? WHERE user_id=?",
|
||||
@@ -146,13 +149,18 @@ def activate_profile(profile_id: int, user_id: int | None = None):
|
||||
|
||||
|
||||
def get_preferences(user_id: int | None = None):
|
||||
user_id = user_id or default_user_id()
|
||||
user_id = user_id or auth.current_user_id() or default_user_id()
|
||||
with connect() as conn:
|
||||
return conn.execute("SELECT * FROM user_preferences WHERE user_id=?", (user_id,)).fetchone()
|
||||
pref = conn.execute("SELECT * FROM user_preferences WHERE user_id=?", (user_id,)).fetchone()
|
||||
if not pref:
|
||||
now = utcnow()
|
||||
conn.execute("INSERT INTO user_preferences(user_id, theme, created_at, updated_at) VALUES(?, 'dark', ?, ?)", (user_id, now, now))
|
||||
pref = conn.execute("SELECT * FROM user_preferences WHERE user_id=?", (user_id,)).fetchone()
|
||||
return pref
|
||||
|
||||
|
||||
def save_preferences(data: dict, user_id: int | None = None):
|
||||
user_id = user_id or default_user_id()
|
||||
user_id = user_id or auth.current_user_id() or default_user_id()
|
||||
allowed_theme = data.get("theme") if data.get("theme") in {"light", "dark"} else None
|
||||
bootstrap_theme = data.get("bootstrap_theme") if data.get("bootstrap_theme") in BOOTSTRAP_THEMES else None
|
||||
font_family = data.get("font_family") if data.get("font_family") in FONT_FAMILIES else None
|
||||
|
||||
@@ -85,7 +85,7 @@ class ScgiRtorrentClient:
|
||||
|
||||
|
||||
def _scgi_retry_attempts() -> int:
|
||||
# Note: Krotki retry/backoff chroni masowe operacje przed chwilowym Errno 111 przy wysokim loadzie rTorrent.
|
||||
# Note: Short retry/backoff protects bulk operations from temporary Errno 111 during high rTorrent load.
|
||||
try:
|
||||
return max(1, min(10, int(os.environ.get("PYTORRENT_SCGI_RETRIES", "5"))))
|
||||
except Exception:
|
||||
@@ -97,7 +97,7 @@ def _scgi_retry_delay(attempt: int) -> float:
|
||||
|
||||
|
||||
def _is_transient_scgi_error(exc: Exception) -> bool:
|
||||
# Note: Retry obejmuje typowe chwilowe bledy SCGI/socket, ale nie ukrywa bledow merytorycznych XML-RPC.
|
||||
# Note: Retry covers common temporary SCGI/socket errors but does not hide semantic XML-RPC errors.
|
||||
if isinstance(exc, (ConnectionRefusedError, ConnectionResetError, TimeoutError, socket.timeout)):
|
||||
return True
|
||||
err_no = getattr(exc, "errno", None)
|
||||
@@ -115,7 +115,7 @@ _UNSUPPORTED_EXEC_METHODS: set[str] = set()
|
||||
_EXEC_TARGET_STYLE: dict[str, int] = {}
|
||||
|
||||
def _rt_execute_preview(method_name: str, call_args: tuple) -> str:
|
||||
# Note: Skrocony opis RPC usuwa dlugie skrypty z komunikatu bledu, ale zostawia metode i pierwsze argumenty do diagnostyki.
|
||||
# Note: The compact RPC summary removes long scripts from error messages while keeping the method and first arguments for diagnostics.
|
||||
preview = ", ".join(repr(x) for x in call_args[:3])
|
||||
if len(call_args) > 3:
|
||||
preview += ", ..."
|
||||
@@ -123,7 +123,7 @@ def _rt_execute_preview(method_name: str, call_args: tuple) -> str:
|
||||
|
||||
|
||||
def _rt_execute_target_variants(method: str, args: tuple) -> list[tuple]:
|
||||
# Note: rTorrent XML-RPC w zaleznosci od wersji wymaga pustego targetu albo go odrzuca; zapamietujemy dzialajacy wariant per metoda.
|
||||
# Note: Depending on version, rTorrent XML-RPC either requires or rejects an empty target; cache the working variant per method.
|
||||
variants = [("", *args), args]
|
||||
preferred = _EXEC_TARGET_STYLE.get(method)
|
||||
if preferred is not None and 0 <= preferred < len(variants):
|
||||
@@ -137,7 +137,7 @@ def _is_rt_method_missing(exc: Exception) -> bool:
|
||||
|
||||
|
||||
def _rt_execute_methods(method: str) -> list[str]:
|
||||
# Note: execute2.* jest probowane dopiero gdy podstawowe execute.* nie istnieje, zeby nie generowac falszywych bledow retry.
|
||||
# Note: execute2.* is tried only when the base execute.* method does not exist to avoid false retry errors.
|
||||
methods = [method]
|
||||
if method.startswith("execute."):
|
||||
fallback = method.replace("execute.", "execute2.", 1)
|
||||
@@ -239,7 +239,7 @@ def _run_remote_move(c: ScgiRtorrentClient, src: str, dst: str, poll_interval: f
|
||||
try:
|
||||
output = str(_rt_execute(c, "execute.capture", "sh", "-c", poll_script, "pytorrent-move-poll", status_path) or "").strip()
|
||||
except Exception as exc:
|
||||
# Note: Podczas masowego move rTorrent potrafi chwilowo nie utworzyc pipe dla execute.capture; polling czeka i probuje dalej.
|
||||
# Note: During bulk moves, rTorrent may briefly not create the execute.capture pipe; polling waits and retries.
|
||||
if _is_rt_timeout_error(exc) or _is_transient_scgi_error(exc):
|
||||
continue
|
||||
raise
|
||||
@@ -289,7 +289,7 @@ def _safe_rm_rf_path(path: str) -> str:
|
||||
|
||||
|
||||
def _run_remote_rm(c: ScgiRtorrentClient, path: str, poll_interval: float = 2.0) -> None:
|
||||
# Note: rm -rf dziala w tle po stronie rTorrent, wiec dlugie kasowanie nie trzyma jednego polaczenia SCGI.
|
||||
# Note: rm -rf runs in the background on the rTorrent side, so long deletes do not hold a single SCGI connection.
|
||||
token = uuid.uuid4().hex
|
||||
status_path = f"/tmp/pytorrent-rm-{token}.status"
|
||||
script = (
|
||||
@@ -310,7 +310,7 @@ def _run_remote_rm(c: ScgiRtorrentClient, path: str, poll_interval: float = 2.0)
|
||||
try:
|
||||
output = str(_rt_execute(c, "execute.capture", "sh", "-c", poll_script, "pytorrent-rm-poll", status_path) or "").strip()
|
||||
except Exception as exc:
|
||||
# Note: Remove uzywa tego samego bezpiecznego pollingu co move, wiec chwilowy brak pipe nie wywala calej kolejki.
|
||||
# Note: Remove uses the same safe polling as move, so a temporary missing pipe does not fail the whole queue.
|
||||
if _is_rt_timeout_error(exc) or _is_transient_scgi_error(exc):
|
||||
continue
|
||||
raise
|
||||
@@ -393,6 +393,21 @@ def _row_progress_complete(row: dict) -> bool:
|
||||
return bool(row.get("complete")) or (size > 0 and completed >= size) or float(row.get("progress") or 0) >= 100.0
|
||||
|
||||
|
||||
def _remove_post_check_label_if_finished(c: ScgiRtorrentClient, row: dict) -> bool:
|
||||
labels = _label_names(str(row.get("label") or ""))
|
||||
if POST_CHECK_DOWNLOAD_LABEL not in labels:
|
||||
return False
|
||||
status = str(row.get("status") or "").lower()
|
||||
if not (_row_progress_complete(row) or status == "seeding"):
|
||||
return False
|
||||
labels = [label for label in labels if label != POST_CHECK_DOWNLOAD_LABEL]
|
||||
value = _label_value(labels)
|
||||
# Note: Clean the temporary label after reaching 100% or entering seeding, even when the state no longer comes directly from recheck.
|
||||
c.call("d.custom1.set", str(row.get("hash") or ""), value)
|
||||
row["label"] = value
|
||||
return True
|
||||
|
||||
|
||||
def apply_post_check_policy(profile: dict, rows: list[dict], previous_rows: dict[str, dict] | None = None) -> list[dict]:
|
||||
"""Start complete torrents after check; pause and label incomplete ones."""
|
||||
previous_rows = previous_rows or {}
|
||||
@@ -401,6 +416,11 @@ def apply_post_check_policy(profile: dict, rows: list[dict], previous_rows: dict
|
||||
for row in rows:
|
||||
h = str(row.get("hash") or "")
|
||||
prev = previous_rows.get(h) or {}
|
||||
try:
|
||||
if h and _remove_post_check_label_if_finished(c, row):
|
||||
changes.append({"hash": h, "action": "remove_post_check_label", "complete": True})
|
||||
except Exception as exc:
|
||||
changes.append({"hash": h, "action": "remove_post_check_label_failed", "error": str(exc)})
|
||||
was_checking = str(prev.get("status") or "") == "Checking" or int(prev.get("hashing") or 0) > 0
|
||||
is_checking = str(row.get("status") or "") == "Checking" or int(row.get("hashing") or 0) > 0
|
||||
if not h or not was_checking or is_checking:
|
||||
@@ -408,7 +428,7 @@ def apply_post_check_policy(profile: dict, rows: list[dict], previous_rows: dict
|
||||
complete = _row_progress_complete(row)
|
||||
try:
|
||||
if complete:
|
||||
# Note: Po zakonczonym checku pelny torrent jest automatycznie startowany, zeby od razu seedowal.
|
||||
# Note: After a completed check, a complete torrent is started automatically so it can seed immediately.
|
||||
c.call("d.start", h)
|
||||
labels = [label for label in _label_names(str(row.get("label") or "")) if label != POST_CHECK_DOWNLOAD_LABEL]
|
||||
if _label_value(labels) != str(row.get("label") or ""):
|
||||
@@ -417,7 +437,7 @@ def apply_post_check_policy(profile: dict, rows: list[dict], previous_rows: dict
|
||||
row.update({"state": 1, "active": 1, "paused": False, "status": "Seeding"})
|
||||
changes.append({"hash": h, "action": "start", "complete": True})
|
||||
else:
|
||||
# Note: Niepelny torrent po checku trafia do pauzy i dostaje etykiete informujaca, ze wymaga dalszego pobierania.
|
||||
# Note: After check, an incomplete torrent is paused and labeled to show that it needs more downloading.
|
||||
c.call("d.start", h)
|
||||
c.call("d.pause", h)
|
||||
labels = _label_names(str(row.get("label") or ""))
|
||||
@@ -1186,7 +1206,7 @@ def _download_runtime_state(c: ScgiRtorrentClient, h: str) -> dict:
|
||||
state = _int_rpc(c, 'd.state', h)
|
||||
active = _int_rpc(c, 'd.is_active', h)
|
||||
opened = _int_rpc(c, 'd.is_open', h)
|
||||
# Note: W rTorrent pauza nie zmienia d.state. Paused to state=1, open=1, active=0.
|
||||
# Note: In rTorrent, pause does not change d.state. Paused means state=1, open=1, active=0.
|
||||
return {
|
||||
'state': state,
|
||||
'open': opened,
|
||||
@@ -1205,7 +1225,7 @@ def pause_hash(c: ScgiRtorrentClient, torrent_hash: str) -> dict:
|
||||
before = _download_runtime_state(c, h)
|
||||
result = {'hash': h, 'before': before, 'commands': []}
|
||||
try:
|
||||
# Note: Smart Queue zatrzymuje slot przez d.pause, nie przez d.stop, żeby późniejsze d.resume działało jak w ruTorrent.
|
||||
# Note: Smart Queue frees a slot with d.pause, not d.stop, so later d.resume behaves like ruTorrent.
|
||||
c.call('d.pause', h)
|
||||
result['commands'].append('d.pause')
|
||||
result['after'] = _download_runtime_state(c, h)
|
||||
@@ -1229,8 +1249,8 @@ def resume_paused_hash(c: ScgiRtorrentClient, torrent_hash: str) -> dict:
|
||||
result.update({'ok': True, 'skipped': 'already_active', 'after': before})
|
||||
return result
|
||||
try:
|
||||
# Note: ruTorrent dla od-pauzowania wysyła odpowiednik unpause/d.resume. Nie dokładamy d.start/d.open,
|
||||
# bo to są komendy dla stanu Stopped/Open, a nie dla czystego Paused.
|
||||
# Note: ruTorrent unpauses with the equivalent of d.resume. Do not add d.start/d.open,
|
||||
# because those commands belong to Stopped/Open state, not a clean Paused state.
|
||||
c.call('d.resume', h)
|
||||
result['commands'].append('d.resume')
|
||||
result['after'] = _download_runtime_state(c, h)
|
||||
@@ -1253,13 +1273,13 @@ def start_or_resume_hash(c: ScgiRtorrentClient, torrent_hash: str) -> dict:
|
||||
return result
|
||||
|
||||
if before.get('paused') or (before.get('state') and not before.get('active')):
|
||||
# Note: Paused w rTorrent wznawiamy tylko przez d.resume; d.start jest tu celowo pomijane.
|
||||
# Note: Paused rTorrent items are resumed only with d.resume; d.start is intentionally skipped here.
|
||||
resumed = resume_paused_hash(c, h)
|
||||
resumed['mode'] = 'resume_paused'
|
||||
return resumed
|
||||
|
||||
try:
|
||||
# Note: d.start zostaje wyłącznie dla Stopped/closed, czyli dla stanu innego niż pause->resume.
|
||||
# Note: d.start remains only for Stopped/closed items, not for the pause-to-resume path.
|
||||
c.call('d.open', h)
|
||||
result['commands'].append('d.open')
|
||||
except Exception as exc:
|
||||
@@ -1352,15 +1372,15 @@ def action(profile: dict, torrent_hashes: list[str], name: str, payload: dict |
|
||||
results.append(item)
|
||||
return {"ok": True, "count": len(torrent_hashes), "move_data": move_data, "results": results}
|
||||
if name == "pause":
|
||||
# Note: Pauza aplikacji jest teraz czystym d.pause, żeby późniejszy resume działał bez stop/start.
|
||||
# Note: The app pause action is now a pure d.pause so later resume works without stop/start.
|
||||
results = [pause_hash(c, h) for h in torrent_hashes]
|
||||
return {"ok": True, "count": len(torrent_hashes), "remove_data": False, "results": results}
|
||||
if name in {"resume", "unpause"}:
|
||||
# Note: Resume/Unpause używa wyłącznie d.resume dla stanu Paused.
|
||||
# Note: Resume/Unpause uses only d.resume for Paused state.
|
||||
results = [resume_paused_hash(c, h) for h in torrent_hashes]
|
||||
return {"ok": True, "count": len(torrent_hashes), "remove_data": False, "results": results}
|
||||
if name == "start":
|
||||
# Note: Start rozdziela Stopped od Paused; paused idzie przez d.resume, stopped przez d.start.
|
||||
# Note: Start separates Stopped from Paused; paused items go through d.resume, stopped items through d.start.
|
||||
results = [start_or_resume_hash(c, h) for h in torrent_hashes]
|
||||
return {"ok": True, "count": len(torrent_hashes), "remove_data": False, "results": results}
|
||||
|
||||
|
||||
@@ -53,7 +53,7 @@ def save_settings(profile_id: int, data: dict[str, Any], user_id: int | None = N
|
||||
'stalled_seconds': max(30, int(data.get('stalled_seconds') or current.get('stalled_seconds') or 300)),
|
||||
'min_speed_bytes': max(0, int(data.get('min_speed_bytes') or current.get('min_speed_bytes') or 0)),
|
||||
'min_seeds': max(0, int(data.get('min_seeds') or current.get('min_seeds') or 0)),
|
||||
# Note: Switch chroni całkiem zatrzymane torrenty przed automatycznym startem; domyślnie Smart Queue zarządza tylko paused.
|
||||
# Note: This switch protects fully stopped torrents from automatic starts; by default Smart Queue manages only paused items.
|
||||
'manage_stopped': 1 if data.get('manage_stopped', current.get('manage_stopped')) else 0,
|
||||
}
|
||||
now = utcnow()
|
||||
@@ -169,14 +169,14 @@ def _restore_auto_label(client: Any, profile_id: int, torrent_hash: str, current
|
||||
if live_label != SMART_QUEUE_LABEL:
|
||||
return False
|
||||
try:
|
||||
# Note: Czyści label Smart Queue także wtedy, gdy torrent został oznaczony wcześniej, ale nie ma już wpisu z poprzednim labelem.
|
||||
# Note: Clear the Smart Queue label even when the torrent was marked earlier but no previous-label entry remains.
|
||||
client.call('d.custom1.set', torrent_hash, '')
|
||||
return True
|
||||
except Exception:
|
||||
return False
|
||||
previous = row.get('previous_label') or ''
|
||||
try:
|
||||
# Note: Przy wznowieniu Smart Queue oddaje poprzedni label tylko wtedy, gdy nadal widzi swój label techniczny.
|
||||
# Note: On resume, Smart Queue restores the previous label only while it still sees its own technical label.
|
||||
if live_label == SMART_QUEUE_LABEL or current_label is None:
|
||||
client.call('d.custom1.set', torrent_hash, previous)
|
||||
conn.execute('DELETE FROM smart_queue_auto_labels WHERE profile_id=? AND torrent_hash=?', (profile_id, torrent_hash))
|
||||
@@ -202,15 +202,15 @@ def _call_rtorrent_setter(client: Any, method: str, value: int) -> bool:
|
||||
def _ensure_rtorrent_download_cap(client: Any, max_active: int) -> dict[str, Any]:
|
||||
"""Raise rTorrent download caps that can silently limit Smart Queue to one item."""
|
||||
result: dict[str, Any] = {'checked': False, 'updated': False, 'items': []}
|
||||
# Note: rTorrent może mieć osobny limit globalny i per-throttle. Gdy div=1,
|
||||
# startowanie kończy się praktycznie jednym aktywnym torrentem mimo targetu 100.
|
||||
# Note: rTorrent may have separate global and per-throttle limits. When div=1,
|
||||
# starts can effectively stop at one active torrent even when the target is 100.
|
||||
for key in ('throttle.max_downloads.global', 'throttle.max_downloads.div'):
|
||||
item: dict[str, Any] = {'key': key, 'checked': False, 'updated': False}
|
||||
try:
|
||||
current = int(client.call(key) or 0)
|
||||
item.update({'checked': True, 'current': current, 'target': int(max_active)})
|
||||
result['checked'] = True
|
||||
# Note: 0 oznacza unlimited; podnosimy tylko dodatnie limity niższe od targetu.
|
||||
# Note: 0 means unlimited; raise only positive limits lower than the target.
|
||||
if 0 < current < max_active:
|
||||
ok = _call_rtorrent_setter(client, f'{key}.set', int(max_active))
|
||||
item['updated'] = ok
|
||||
@@ -231,9 +231,9 @@ def _start_download(client: Any, torrent: dict[str, Any]) -> dict[str, Any]:
|
||||
if not h:
|
||||
return {'hash': h, 'ok': False, 'error': 'missing hash'}
|
||||
if bool(torrent.get('paused')) or str(torrent.get('status') or '').lower() == 'paused' or int(torrent.get('state') or 0):
|
||||
# Note: Kandydaci Smart Queue po d.pause mają być wznawiani przez d.resume, bez d.start/d.stop.
|
||||
# Note: Smart Queue candidates paused with d.pause must be resumed with d.resume, without d.start/d.stop.
|
||||
return rtorrent.resume_paused_hash(client, h)
|
||||
# Note: Tylko opcjonalne manage_stopped korzysta ze ścieżki start dla całkowicie zatrzymanych torrentów.
|
||||
# Note: Only optional manage_stopped uses the start path for fully stopped torrents.
|
||||
return rtorrent.start_or_resume_hash(client, h)
|
||||
|
||||
|
||||
@@ -277,8 +277,8 @@ def _read_live_start_state(client: Any, torrent_hash: str) -> dict[str, Any]:
|
||||
result[key] = int(value or 0) if key in {'state', 'active', 'open', 'priority'} else str(value or '')
|
||||
except Exception as exc:
|
||||
result[f'{key}_error'] = str(exc)
|
||||
# Note: Nie uznajemy d.is_open ani state=1 za wznowienie; Paused też potrafi mieć te wartości.
|
||||
# Smart Queue zalicza start dopiero po d.is_active=1, czyli po realnym zdjęciu pauzy.
|
||||
# Note: Do not treat d.is_open or state=1 as resumed; Paused can also have those values.
|
||||
# Smart Queue counts a start only after d.is_active=1, meaning the pause was actually removed.
|
||||
result['started'] = bool(int(result.get('active') or 0))
|
||||
return result
|
||||
|
||||
@@ -308,11 +308,11 @@ def _is_smart_queue_hold(torrent: dict[str, Any] | None, manage_stopped: bool =
|
||||
return False
|
||||
if str(torrent.get('label') or '') == SMART_QUEUE_LABEL:
|
||||
return True
|
||||
# Note: Paused w rTorrent zwykle ma state=1 i active=0, więc nie wolno wymagać state=0.
|
||||
# Dzięki temu Smart Queue widzi pauzowane torrenty jako oczekujące i może później dobić target kolejki.
|
||||
# Note: Paused in rTorrent usually has state=1 and active=0, so state=0 must not be required.
|
||||
# This lets Smart Queue treat paused torrents as pending and fill the queue target later.
|
||||
if bool(torrent.get('paused')):
|
||||
return True
|
||||
# Note: Całkiem zatrzymane pozycje są zarządzane tylko po włączeniu opcji Use stopped torrents.
|
||||
# Note: Fully stopped items are managed only when Use stopped torrents is enabled.
|
||||
if not manage_stopped:
|
||||
return False
|
||||
return not int(torrent.get('state') or 0)
|
||||
@@ -322,7 +322,7 @@ def _clear_untracked_smart_queue_label(client: Any, torrent_hash: str, current_l
|
||||
if current_label != SMART_QUEUE_LABEL:
|
||||
return False
|
||||
try:
|
||||
# Note: Czyści osierocony label Smart Queue, gdy brak wpisu z poprzednim labelem w bazie.
|
||||
# Note: Clear an orphaned Smart Queue label when no previous-label entry exists in the database.
|
||||
client.call('d.custom1.set', torrent_hash, '')
|
||||
return True
|
||||
except Exception:
|
||||
@@ -359,8 +359,8 @@ def _cleanup_auto_labels(client: Any, profile_id: int, torrents: list[dict[str,
|
||||
|
||||
def _is_running_download_slot(t: dict[str, Any]) -> bool:
|
||||
"""Return True for incomplete torrents that already occupy a Smart Queue slot."""
|
||||
# Note: Limit Smart Queue oznacza docelową liczbę realnie aktywnych slotów.
|
||||
# Paused potrafi mieć state=1/open=1, dlatego slot liczymy dopiero po d.is_active=1.
|
||||
# Note: The Smart Queue limit means the target number of actually active slots.
|
||||
# Paused can have state=1/open=1, so a slot is counted only after d.is_active=1.
|
||||
if int(t.get('complete') or 0):
|
||||
return False
|
||||
if str(t.get('label') or '') == SMART_QUEUE_LABEL:
|
||||
@@ -377,10 +377,10 @@ def _is_waiting_download_candidate(t: dict[str, Any], manage_stopped: bool) -> b
|
||||
return False
|
||||
if str(t.get('label') or '') == SMART_QUEUE_LABEL:
|
||||
return True
|
||||
# Note: Paused jest podstawowym źródłem dobijania kolejki, niezależnie od opcji manage_stopped.
|
||||
# Note: Paused items are the primary source for filling the queue, regardless of manage_stopped.
|
||||
if bool(t.get('paused')) or str(t.get('status') or '').lower() == 'paused':
|
||||
return True
|
||||
# Note: Stopped dokładamy tylko wtedy, gdy użytkownik zaznaczył Use stopped torrents.
|
||||
# Note: Stopped items are added only when the user enabled Use stopped torrents.
|
||||
return bool(manage_stopped) and not int(t.get('state') or 0)
|
||||
|
||||
|
||||
@@ -394,7 +394,7 @@ def check(profile: dict | None = None, user_id: int | None = None, force: bool =
|
||||
if not force and not int(settings.get('enabled') or 0):
|
||||
restored: list[str] = []
|
||||
try:
|
||||
# Note: Przy wyłączonym Smart Queue sprzątamy wyłącznie techniczne labele, bez startowania lub pauzowania torrentów.
|
||||
# Note: When Smart Queue is disabled, only technical labels are cleaned up, without starting or pausing torrents.
|
||||
torrents = rtorrent.list_torrents(profile)
|
||||
restored = _cleanup_auto_labels(rtorrent.client_for(profile), profile_id, torrents, set(), bool(settings.get('manage_stopped')))
|
||||
except Exception:
|
||||
@@ -408,15 +408,15 @@ def check(profile: dict | None = None, user_id: int | None = None, force: bool =
|
||||
def is_managed_hold(t: dict[str, Any]) -> bool:
|
||||
return str(t.get('label') or '') == SMART_QUEUE_LABEL
|
||||
|
||||
# Note: Slot Smart Queue liczymy po d.is_active, bo Paused może mieć state=1/open=1 i nie może zajmować miejsca w limicie.
|
||||
# Note: Count Smart Queue slots by d.is_active because Paused can have state=1/open=1 and must not occupy the limit.
|
||||
downloading = [
|
||||
t for t in torrents
|
||||
if _is_running_download_slot(t)
|
||||
and not is_managed_hold(t)
|
||||
and t.get('hash') not in excluded
|
||||
]
|
||||
# Note: Kandydaci obejmują także zwykłe Paused bez labela. Inaczej kolejka widzi tylko 1-2 sztuki
|
||||
# i nie potrafi dobić do zadanego targetu 100.
|
||||
# Note: Candidates also include regular Paused items without a label. Otherwise the queue sees only one or two items
|
||||
# and cannot fill the configured target of 100.
|
||||
stopped = [
|
||||
t for t in torrents
|
||||
if t.get('hash') not in excluded
|
||||
@@ -472,8 +472,8 @@ def check(profile: dict | None = None, user_id: int | None = None, force: bool =
|
||||
to_pause: list[dict[str, Any]] = pause_rank[:max(0, len(downloading) - max_active)]
|
||||
pause_hashes = {str(t.get('hash') or '') for t in to_pause}
|
||||
|
||||
# Note: Rotacja stalled działa tylko przy pełnej kolejce. Gdy brakuje slotów, Smart Queue ma
|
||||
# najpierw dobrać brakujące pozycje, a nie pauzować już istniejące lub błędnie uznane za stalled.
|
||||
# Note: Stalled rotation runs only when the queue is full. When slots are missing, Smart Queue should
|
||||
# first add missing items instead of pausing existing or incorrectly detected stalled items.
|
||||
if candidates and len(downloading) >= max_active:
|
||||
replaceable_stalled = [t for t in stalled if str(t.get('hash') or '') not in pause_hashes]
|
||||
for t in replaceable_stalled[:max(0, len(candidates) - len(to_pause))]:
|
||||
@@ -483,7 +483,7 @@ def check(profile: dict | None = None, user_id: int | None = None, force: bool =
|
||||
active_after_pause = max(0, len(downloading) - len(to_pause))
|
||||
available_slots = max(0, max_active - active_after_pause)
|
||||
to_resume = candidates[:available_slots]
|
||||
# Note: Pozycje poza bieżącą pulą startu zostają jawnie oznaczone jako oczekujące Smart Queue.
|
||||
# Note: Items outside the current start batch are explicitly marked as pending Smart Queue items.
|
||||
to_label_waiting = candidates[available_slots:]
|
||||
|
||||
c = rtorrent.client_for(profile)
|
||||
@@ -517,8 +517,8 @@ def check(profile: dict | None = None, user_id: int | None = None, force: bool =
|
||||
except Exception:
|
||||
label_failed.append(h)
|
||||
|
||||
# Note: Startujemy całą pulę kandydatów w jednej rundzie. Label zdejmujemy po zaakceptowanym RPC,
|
||||
# bo rTorrent może trzymać część pozycji w swojej kolejce z active=0 mimo poprawnego d.start/d.resume.
|
||||
# Note: Start the whole candidate batch in one round. Remove the label after an accepted RPC,
|
||||
# because rTorrent may keep some items in its own queue with active=0 despite a valid d.start/d.resume.
|
||||
for t in to_resume:
|
||||
h = str(t.get('hash') or '')
|
||||
if not h:
|
||||
@@ -533,7 +533,7 @@ def check(profile: dict | None = None, user_id: int | None = None, force: bool =
|
||||
active_verified, start_no_effect = _verify_started_downloads(c, resume_requested)
|
||||
for h in active_verified:
|
||||
_restore_auto_label(c, profile_id, h, None)
|
||||
# Note: Historia pokazuje tylko torrenty faktycznie zdjęte z pauzy, a nie samą liczbę wysłanych komend.
|
||||
# Note: History shows only torrents actually unpaused, not just the number of sent commands.
|
||||
resumed = list(active_verified)
|
||||
keep_labels = (
|
||||
set(paused)
|
||||
|
||||
@@ -171,7 +171,7 @@ def maybe_refresh(profile: dict | None, force: bool = False) -> dict[str, Any] |
|
||||
return cached
|
||||
|
||||
|
||||
def queue_refresh(socketio, profile: dict | None, force: bool = False, emit_update: bool = True) -> dict[str, Any] | None:
|
||||
def queue_refresh(socketio, profile: dict | None, force: bool = False, emit_update: bool = True, room: str | None = None) -> dict[str, Any] | None:
|
||||
"""Schedule heavier statistics refresh outside the main WebSocket/system poller."""
|
||||
if not profile:
|
||||
return None
|
||||
@@ -195,10 +195,12 @@ def queue_refresh(socketio, profile: dict | None, force: bool = False, emit_upda
|
||||
# Note: This can query file metadata per torrent, so it never runs inside the fast CPU/RAM/disk poller.
|
||||
stats = get(profile_snapshot, force=True)
|
||||
if emit_update and stats:
|
||||
socketio.emit("torrent_stats_update", {"profile_id": profile_id, "stats": stats})
|
||||
payload = {"profile_id": profile_id, "stats": stats}
|
||||
socketio.emit("torrent_stats_update", payload, to=room) if room else socketio.emit("torrent_stats_update", payload)
|
||||
except Exception as exc:
|
||||
if emit_update:
|
||||
socketio.emit("torrent_stats_update", {"profile_id": profile_id, "ok": False, "error": str(exc)})
|
||||
payload = {"profile_id": profile_id, "ok": False, "error": str(exc)}
|
||||
socketio.emit("torrent_stats_update", payload, to=room) if room else socketio.emit("torrent_stats_update", payload)
|
||||
finally:
|
||||
with _BACKGROUND_LOCK:
|
||||
_BACKGROUND_PROFILE_IDS.discard(profile_id)
|
||||
|
||||
@@ -2,12 +2,31 @@ from __future__ import annotations
|
||||
|
||||
import threading
|
||||
import psutil
|
||||
from flask_socketio import emit
|
||||
from flask_socketio import emit, join_room, leave_room, disconnect
|
||||
from ..config import POLL_INTERVAL
|
||||
from .preferences import active_profile, get_profile
|
||||
from .torrent_cache import torrent_cache
|
||||
from .torrent_summary import cached_summary
|
||||
from . import rtorrent, smart_queue, traffic_history, automation_rules, torrent_stats
|
||||
from . import rtorrent, smart_queue, traffic_history, automation_rules, torrent_stats, auth
|
||||
|
||||
|
||||
def _profile_room(profile_id: int) -> str:
|
||||
return f"profile:{int(profile_id)}"
|
||||
|
||||
|
||||
def _poller_profiles() -> list[dict]:
|
||||
# Note: Background polling has no browser session, so auth-enabled mode refreshes all profiles and emits only to per-profile rooms.
|
||||
if not auth.enabled():
|
||||
profile = active_profile()
|
||||
return [profile] if profile else []
|
||||
from ..db import connect
|
||||
with connect() as conn:
|
||||
return conn.execute("SELECT * FROM rtorrent_profiles ORDER BY id").fetchall()
|
||||
|
||||
|
||||
def _emit_profile(socketio, event: str, payload: dict, profile_id: int) -> None:
|
||||
target = _profile_room(profile_id) if auth.enabled() else None
|
||||
socketio.emit(event, payload, to=target) if target else socketio.emit(event, payload)
|
||||
|
||||
_started = False
|
||||
_start_lock = threading.Lock()
|
||||
@@ -18,14 +37,16 @@ def register_socketio_handlers(socketio):
|
||||
def poller():
|
||||
tick = 0
|
||||
while True:
|
||||
profile = active_profile()
|
||||
if profile:
|
||||
for profile in _poller_profiles():
|
||||
if not profile:
|
||||
continue
|
||||
pid = int(profile["id"])
|
||||
diff = torrent_cache.refresh(profile)
|
||||
heartbeat = {"ok": bool(diff.get("ok")), "profile_id": profile["id"], "tick": tick, "error": diff.get("error", "")}
|
||||
heartbeat = {"ok": bool(diff.get("ok")), "profile_id": pid, "tick": tick, "error": diff.get("error", "")}
|
||||
if diff.get("ok") and (diff["added"] or diff["updated"] or diff["removed"]):
|
||||
socketio.emit("torrent_patch", {**diff, "summary": cached_summary(profile["id"], torrent_cache.snapshot(profile["id"]), force=True)})
|
||||
_emit_profile(socketio, "torrent_patch", {**diff, "summary": cached_summary(pid, torrent_cache.snapshot(pid), force=True)}, pid)
|
||||
elif not diff.get("ok"):
|
||||
socketio.emit("rtorrent_error", diff)
|
||||
_emit_profile(socketio, "rtorrent_error", diff, pid)
|
||||
try:
|
||||
status = rtorrent.system_status(profile)
|
||||
if bool(profile.get("is_remote")):
|
||||
@@ -36,36 +57,36 @@ def register_socketio_handlers(socketio):
|
||||
status["ram"] = psutil.virtual_memory().percent
|
||||
status["usage_source"] = "local"
|
||||
status["usage_available"] = True
|
||||
status["profile_id"] = profile["id"]
|
||||
traffic_history.record(profile["id"], status.get("down_rate", 0), status.get("up_rate", 0), status.get("total_down", 0), status.get("total_up", 0))
|
||||
socketio.emit("system_stats", status)
|
||||
status["profile_id"] = pid
|
||||
traffic_history.record(pid, status.get("down_rate", 0), status.get("up_rate", 0), status.get("total_down", 0), status.get("total_up", 0))
|
||||
_emit_profile(socketio, "system_stats", status, pid)
|
||||
heartbeat["ok"] = True
|
||||
except Exception as exc:
|
||||
heartbeat["ok"] = False
|
||||
heartbeat["error"] = str(exc)
|
||||
socketio.emit("rtorrent_error", {"profile_id": profile["id"], "error": str(exc)})
|
||||
_emit_profile(socketio, "rtorrent_error", {"profile_id": pid, "error": str(exc)}, pid)
|
||||
if tick % max(1, int(15 * 60 / POLL_INTERVAL)) == 0:
|
||||
# Note: Queue heavier torrent statistics outside the fast system_stats poller.
|
||||
torrent_stats.queue_refresh(socketio, profile, force=False)
|
||||
torrent_stats.queue_refresh(socketio, profile, force=False, room=_profile_room(pid) if auth.enabled() else None)
|
||||
if tick % max(1, int(30 / POLL_INTERVAL)) == 0:
|
||||
try:
|
||||
result = smart_queue.check(profile, force=False)
|
||||
if result.get("enabled"):
|
||||
socketio.emit("smart_queue_update", result)
|
||||
_emit_profile(socketio, "smart_queue_update", result, pid)
|
||||
if result.get("paused") or result.get("resumed") or result.get("resume_requested"):
|
||||
# Note: Po zmianach Smart Queue natychmiast odświeżamy cache, żeby lista Downloading nie czekała na następny cykl pollera.
|
||||
# Note: After Smart Queue changes, refresh cache immediately so the Downloading list does not wait for the next poller cycle.
|
||||
queue_diff = torrent_cache.refresh(profile)
|
||||
if queue_diff.get("ok"):
|
||||
socketio.emit("torrent_patch", {**queue_diff, "summary": cached_summary(profile["id"], torrent_cache.snapshot(profile["id"]), force=True)})
|
||||
_emit_profile(socketio, "torrent_patch", {**queue_diff, "summary": cached_summary(pid, torrent_cache.snapshot(pid), force=True)}, pid)
|
||||
except Exception as exc:
|
||||
socketio.emit("smart_queue_update", {"ok": False, "error": str(exc)})
|
||||
_emit_profile(socketio, "smart_queue_update", {"ok": False, "error": str(exc)}, pid)
|
||||
try:
|
||||
auto_result = automation_rules.check(profile, force=False)
|
||||
if auto_result.get("applied"):
|
||||
socketio.emit("automation_update", auto_result)
|
||||
_emit_profile(socketio, "automation_update", auto_result, pid)
|
||||
except Exception as exc:
|
||||
socketio.emit("automation_update", {"ok": False, "error": str(exc)})
|
||||
socketio.emit("heartbeat", heartbeat)
|
||||
_emit_profile(socketio, "automation_update", {"ok": False, "error": str(exc)}, pid)
|
||||
_emit_profile(socketio, "heartbeat", heartbeat, pid)
|
||||
tick += 1
|
||||
socketio.sleep(POLL_INTERVAL)
|
||||
|
||||
@@ -73,7 +94,7 @@ def register_socketio_handlers(socketio):
|
||||
global _started
|
||||
with _start_lock:
|
||||
if not _started:
|
||||
# Note: Poller startuje przy starcie aplikacji, więc Smart Queue i automatyzacje działają bez otwartego UI.
|
||||
# Note: The poller starts with the app, so Smart Queue and automations work without an open UI.
|
||||
socketio.start_background_task(poller)
|
||||
_started = True
|
||||
|
||||
@@ -82,10 +103,16 @@ def register_socketio_handlers(socketio):
|
||||
@socketio.on("connect")
|
||||
def handle_connect():
|
||||
ensure_poller_started()
|
||||
if auth.enabled() and not auth.current_user_id():
|
||||
# Note: Socket.IO uses the same session auth as REST API; unauthenticated clients are disconnected.
|
||||
disconnect()
|
||||
return False
|
||||
profile = active_profile()
|
||||
if profile:
|
||||
join_room(_profile_room(profile["id"]))
|
||||
emit("connected", {"ok": True, "profile": profile})
|
||||
if not profile:
|
||||
# Note: Fresh installs have no rTorrent yet; tell the client to show setup instead of waiting for a snapshot.
|
||||
# Note: Fresh installs or users without profile access get setup state, not another user's snapshot.
|
||||
emit("profile_required", {"ok": True, "profiles": []})
|
||||
return
|
||||
rows = torrent_cache.snapshot(profile["id"])
|
||||
@@ -93,6 +120,12 @@ def register_socketio_handlers(socketio):
|
||||
|
||||
@socketio.on("select_profile")
|
||||
def handle_select_profile(data):
|
||||
if auth.enabled() and not auth.current_user_id():
|
||||
disconnect()
|
||||
return
|
||||
old_profile = active_profile()
|
||||
if old_profile:
|
||||
leave_room(_profile_room(old_profile["id"]))
|
||||
profile_id = int((data or {}).get("profile_id") or 0)
|
||||
if not profile_id:
|
||||
# Note: Ignore empty profile selections created before the first rTorrent profile exists.
|
||||
@@ -100,8 +133,9 @@ def register_socketio_handlers(socketio):
|
||||
return
|
||||
profile = get_profile(profile_id)
|
||||
if not profile:
|
||||
emit("rtorrent_error", {"error": "Profile does not exist"})
|
||||
emit("rtorrent_error", {"error": "Profile access denied or profile does not exist"})
|
||||
return
|
||||
join_room(_profile_room(profile_id))
|
||||
diff = torrent_cache.refresh(profile)
|
||||
rows = torrent_cache.snapshot(profile_id)
|
||||
emit("torrent_snapshot", {"profile_id": profile_id, "torrents": rows, "summary": cached_summary(profile_id, rows, force=True), "error": diff.get("error", "")})
|
||||
|
||||
@@ -5,7 +5,7 @@ import threading
|
||||
import time
|
||||
import uuid
|
||||
from concurrent.futures import ThreadPoolExecutor
|
||||
from . import rtorrent
|
||||
from . import rtorrent, auth
|
||||
from .preferences import get_profile
|
||||
from ..config import WORKERS
|
||||
from ..db import connect, utcnow, default_user_id
|
||||
@@ -23,7 +23,13 @@ def set_socketio(socketio):
|
||||
|
||||
|
||||
def _emit(name: str, payload: dict):
|
||||
if _socketio:
|
||||
if not _socketio:
|
||||
return
|
||||
profile_id = payload.get("profile_id")
|
||||
if auth.enabled() and profile_id:
|
||||
# Note: Job/socket events are sent only to clients joined to the affected profile room.
|
||||
_socketio.emit(name, payload, to=f"profile:{int(profile_id)}")
|
||||
else:
|
||||
_socketio.emit(name, payload)
|
||||
|
||||
|
||||
@@ -97,7 +103,7 @@ def _set_job(job_id: str, status: str, error: str = "", result: dict | None = No
|
||||
|
||||
|
||||
def enqueue(action_name: str, profile_id: int, payload: dict, user_id: int | None = None, max_attempts: int = 2) -> str:
|
||||
user_id = user_id or default_user_id()
|
||||
user_id = user_id or auth.current_user_id() or default_user_id()
|
||||
job_id = uuid.uuid4().hex
|
||||
now = utcnow()
|
||||
with connect() as conn:
|
||||
@@ -130,7 +136,7 @@ def _run(job_id: str):
|
||||
profile = get_profile(int(job["profile_id"]), int(job["user_id"]))
|
||||
if not profile:
|
||||
_set_job(job_id, "failed", "rTorrent profile does not exist", finished=True)
|
||||
_emit("job_update", {"id": job_id, "status": "failed", "error": "profile not found"})
|
||||
_emit("job_update", {"id": job_id, "profile_id": job.get("profile_id"), "status": "failed", "error": "profile not found"})
|
||||
return
|
||||
profile_id = int(profile["id"])
|
||||
ordered_lock = None
|
||||
@@ -150,26 +156,26 @@ def _run(job_id: str):
|
||||
with connect() as conn:
|
||||
conn.execute("UPDATE jobs SET status='running', attempts=?, started_at=COALESCE(started_at, ?), updated_at=? WHERE id=?", (attempts, utcnow(), utcnow(), job_id))
|
||||
_emit("operation_started", {"job_id": job_id, "action": job["action"], "profile_id": profile["id"], "hashes": payload.get("hashes") or [], "hash_count": len(payload.get("hashes") or []), "bulk": len(payload.get("hashes") or []) > 1})
|
||||
_emit("job_update", {"id": job_id, "status": "running", "attempts": attempts})
|
||||
_emit("job_update", {"id": job_id, "profile_id": profile["id"], "status": "running", "attempts": attempts})
|
||||
result = _execute(profile, job["action"], payload)
|
||||
fresh = _job_row(job_id)
|
||||
# Awaryjne anulowanie: jeżeli użytkownik anuluje zadanie w trakcie pracy, wynik nie nadpisuje statusu cancelled.
|
||||
# Note: Emergency cancel keeps a cancelled job from being overwritten when work finishes later.
|
||||
if fresh and fresh["status"] == "cancelled":
|
||||
return
|
||||
_set_job(job_id, "done", result=result, finished=True)
|
||||
_emit("operation_finished", {"job_id": job_id, "action": job["action"], "profile_id": profile["id"], "hashes": payload.get("hashes") or [], "hash_count": len(payload.get("hashes") or []), "bulk": len(payload.get("hashes") or []) > 1, "result": result})
|
||||
_emit("job_update", {"id": job_id, "status": "done", "result": result})
|
||||
_emit("job_update", {"id": job_id, "profile_id": profile["id"], "status": "done", "result": result})
|
||||
except Exception as exc:
|
||||
fresh = _job_row(job_id) or {}
|
||||
attempts = int(fresh.get("attempts") or 1)
|
||||
max_attempts = int(fresh.get("max_attempts") or 2)
|
||||
# Awaryjne anulowanie: wyjątek z anulowanego zadania nie przywraca go do retry ani failed.
|
||||
# Note: Emergency cancel keeps an exception from a cancelled job from moving it back to retry or failed.
|
||||
if fresh and fresh.get("status") == "cancelled":
|
||||
return
|
||||
status = "pending" if attempts < max_attempts else "failed"
|
||||
_set_job(job_id, status, str(exc), finished=(status == "failed"))
|
||||
_emit("operation_failed", {"job_id": job_id, "action": job.get("action"), "profile_id": job.get("profile_id"), "hashes": payload.get("hashes") or [], "error": str(exc)})
|
||||
_emit("job_update", {"id": job_id, "status": status, "error": str(exc), "attempts": attempts})
|
||||
_emit("job_update", {"id": job_id, "profile_id": job.get("profile_id"), "status": status, "error": str(exc), "attempts": attempts})
|
||||
if status == "pending":
|
||||
_executor.submit(_run, job_id)
|
||||
finally:
|
||||
@@ -225,12 +231,23 @@ def _public_job(row) -> dict:
|
||||
return d
|
||||
|
||||
|
||||
def _job_scope_sql(writable: bool = False) -> tuple[str, tuple]:
|
||||
visible = auth.writable_profile_ids() if writable else auth.visible_profile_ids()
|
||||
if visible is None:
|
||||
return "", ()
|
||||
if not visible:
|
||||
return " WHERE 1=0", ()
|
||||
placeholders = ",".join("?" for _ in visible)
|
||||
return f" WHERE profile_id IN ({placeholders})", tuple(visible)
|
||||
|
||||
|
||||
def list_jobs(limit: int = 200, offset: int = 0):
|
||||
limit = max(1, min(int(limit or 50), 500))
|
||||
offset = max(0, int(offset or 0))
|
||||
where, params = _job_scope_sql()
|
||||
with connect() as conn:
|
||||
rows = conn.execute("SELECT * FROM jobs ORDER BY created_at DESC LIMIT ? OFFSET ?", (limit, offset)).fetchall()
|
||||
total = conn.execute("SELECT COUNT(*) AS n FROM jobs").fetchone()["n"]
|
||||
rows = conn.execute(f"SELECT * FROM jobs{where} ORDER BY created_at DESC LIMIT ? OFFSET ?", (*params, limit, offset)).fetchall()
|
||||
total = conn.execute(f"SELECT COUNT(*) AS n FROM jobs{where}", params).fetchone()["n"]
|
||||
return {"rows": [_public_job(r) for r in rows], "total": total, "limit": limit, "offset": offset}
|
||||
|
||||
|
||||
@@ -238,24 +255,30 @@ def cancel_job(job_id: str) -> bool:
|
||||
row = _job_row(job_id)
|
||||
if not row or row["status"] not in {"pending", "running"}:
|
||||
return False
|
||||
# Note: Emergency cancel ma sens tylko dla niedokonczonych zadan; failed/done zostaja tylko do retry albo czyszczenia logow.
|
||||
# Note: Emergency cancel is useful only for unfinished jobs; failed/done entries stay available for retry or log cleanup.
|
||||
_set_job(job_id, "cancelled", finished=True)
|
||||
_emit("job_update", {"id": job_id, "status": "cancelled"})
|
||||
_emit("job_update", {"id": job_id, "profile_id": row.get("profile_id"), "status": "cancelled"})
|
||||
return True
|
||||
|
||||
|
||||
def clear_jobs() -> int:
|
||||
where, params = _job_scope_sql(writable=True)
|
||||
status_clause = "status NOT IN ('pending', 'running')"
|
||||
sql = f"DELETE FROM jobs{where} AND {status_clause}" if where else f"DELETE FROM jobs WHERE {status_clause}"
|
||||
with connect() as conn:
|
||||
cur = conn.execute("DELETE FROM jobs WHERE status NOT IN ('pending', 'running')")
|
||||
cur = conn.execute(sql, params)
|
||||
return int(cur.rowcount or 0)
|
||||
|
||||
|
||||
def emergency_clear_jobs() -> int:
|
||||
# Awaryjne czyszczenie: najpierw zamyka aktywne zadania jako cancelled, potem czyści całą listę job logów.
|
||||
# Note: Emergency cleanup first marks active jobs as cancelled, then clears the whole job log list.
|
||||
now = utcnow()
|
||||
where, params = _job_scope_sql(writable=True)
|
||||
status_clause = "status IN ('pending', 'running')"
|
||||
update_sql = f"UPDATE jobs SET status='cancelled', error='Emergency cancelled by user', finished_at=COALESCE(finished_at, ?), updated_at=?{where} AND {status_clause}" if where else "UPDATE jobs SET status='cancelled', error='Emergency cancelled by user', finished_at=COALESCE(finished_at, ?), updated_at=? WHERE status IN ('pending', 'running')"
|
||||
with connect() as conn:
|
||||
conn.execute("UPDATE jobs SET status='cancelled', error='Emergency cancelled by user', finished_at=COALESCE(finished_at, ?), updated_at=? WHERE status IN ('pending', 'running')", (now, now))
|
||||
cur = conn.execute("DELETE FROM jobs")
|
||||
conn.execute(update_sql, (now, now, *params) if where else (now, now))
|
||||
cur = conn.execute(f"DELETE FROM jobs{where}", params) if where else conn.execute("DELETE FROM jobs")
|
||||
deleted = int(cur.rowcount or 0)
|
||||
_emit("job_update", {"status": "cleared", "emergency": True})
|
||||
return deleted
|
||||
@@ -267,6 +290,6 @@ def retry_job(job_id: str) -> bool:
|
||||
return False
|
||||
with connect() as conn:
|
||||
conn.execute("UPDATE jobs SET status='pending', error='', finished_at=NULL, updated_at=? WHERE id=?", (utcnow(), job_id))
|
||||
_emit("job_update", {"id": job_id, "status": "pending"})
|
||||
_emit("job_update", {"id": job_id, "profile_id": row.get("profile_id"), "status": "pending"})
|
||||
_executor.submit(_run, job_id)
|
||||
return True
|
||||
|
||||
Reference in New Issue
Block a user