add auth support
This commit is contained in:
@@ -13,9 +13,10 @@ import socket
|
||||
import json
|
||||
import psutil
|
||||
import xml.etree.ElementTree as ET
|
||||
from flask import Blueprint, jsonify, request
|
||||
from flask import Blueprint, jsonify, request, abort
|
||||
from ..config import DB_PATH, JOBS_RETENTION_DAYS, SMART_QUEUE_HISTORY_RETENTION_DAYS, WORKERS
|
||||
from ..db import default_user_id, connect, utcnow
|
||||
from ..db import connect, utcnow
|
||||
from ..services.auth import current_user_id as default_user_id, current_user, list_users, save_user, delete_user, login_user, logout_user, enabled as auth_enabled, require_profile_write
|
||||
from ..services import preferences, rtorrent, torrent_stats
|
||||
from ..services.torrent_cache import torrent_cache
|
||||
from ..services.torrent_summary import cached_summary
|
||||
@@ -27,6 +28,77 @@ bp = Blueprint("api", __name__, url_prefix="/api")
|
||||
MOVE_BULK_MAX_HASHES = 100
|
||||
|
||||
|
||||
@bp.post("/auth/login")
|
||||
def auth_login():
|
||||
# Note: Auth API is hidden when optional authentication is disabled.
|
||||
if not auth_enabled():
|
||||
abort(404)
|
||||
data = request.get_json(silent=True) or {}
|
||||
user = login_user(str(data.get("username") or ""), str(data.get("password") or ""))
|
||||
if not user:
|
||||
return jsonify({"ok": False, "error": "Invalid username or password"}), 401
|
||||
return ok({"user": user, "auth_enabled": auth_enabled()})
|
||||
|
||||
|
||||
@bp.get("/auth/me")
|
||||
def auth_me():
|
||||
if not auth_enabled():
|
||||
abort(404)
|
||||
return ok({"user": current_user(), "auth_enabled": auth_enabled()})
|
||||
|
||||
|
||||
@bp.post("/auth/logout")
|
||||
def auth_logout():
|
||||
if not auth_enabled():
|
||||
abort(404)
|
||||
logout_user()
|
||||
return ok()
|
||||
|
||||
|
||||
@bp.get("/auth/users")
|
||||
def auth_users_list():
|
||||
if not auth_enabled():
|
||||
abort(404)
|
||||
return ok({"users": list_users()})
|
||||
|
||||
|
||||
@bp.post("/auth/users")
|
||||
def auth_users_create():
|
||||
if not auth_enabled():
|
||||
abort(404)
|
||||
try:
|
||||
return ok({"user": save_user(request.get_json(silent=True) or {})})
|
||||
except Exception as exc:
|
||||
return jsonify({"ok": False, "error": str(exc)}), 400
|
||||
|
||||
|
||||
@bp.put("/auth/users/<int:user_id>")
|
||||
def auth_users_update(user_id: int):
|
||||
if not auth_enabled():
|
||||
abort(404)
|
||||
try:
|
||||
return ok({"user": save_user(request.get_json(silent=True) or {}, user_id)})
|
||||
except Exception as exc:
|
||||
return jsonify({"ok": False, "error": str(exc)}), 400
|
||||
|
||||
|
||||
@bp.delete("/auth/users/<int:user_id>")
|
||||
def auth_users_delete(user_id: int):
|
||||
if not auth_enabled():
|
||||
abort(404)
|
||||
try:
|
||||
delete_user(user_id)
|
||||
return ok({"users": list_users()})
|
||||
except Exception as exc:
|
||||
return jsonify({"ok": False, "error": str(exc)}), 400
|
||||
|
||||
|
||||
|
||||
def _job_profile_id(job_id: str) -> int | None:
|
||||
with connect() as conn:
|
||||
row = conn.execute("SELECT profile_id FROM jobs WHERE id=?", (job_id,)).fetchone()
|
||||
return int(row.get("profile_id") or 0) if row else None
|
||||
|
||||
def ok(payload=None):
|
||||
data = {"ok": True}
|
||||
if payload:
|
||||
@@ -312,7 +384,7 @@ def _chunk_hashes(hashes: list[str], size: int = MOVE_BULK_MAX_HASHES) -> list[l
|
||||
|
||||
|
||||
def enqueue_bulk_parts(profile: dict, action_name: str, data: dict) -> list[dict]:
|
||||
# Note: Jedna wspolna funkcja dzieli duze operacje move/remove na male, uporzadkowane party bez ruszania pozostalych akcji.
|
||||
# Note: One shared helper splits large move/remove operations into small ordered parts without changing other actions.
|
||||
base_payload = enrich_bulk_payload(profile, action_name, data)
|
||||
hashes = base_payload.get("hashes") or []
|
||||
chunks = _chunk_hashes(hashes)
|
||||
@@ -342,12 +414,12 @@ def enqueue_bulk_parts(profile: dict, action_name: str, data: dict) -> list[dict
|
||||
|
||||
|
||||
def enqueue_move_bulk_parts(profile: dict, data: dict) -> list[dict]:
|
||||
# Note: Zachowuje stary publiczny helper dla move, ale korzysta z tej samej logiki partycji.
|
||||
# Note: Keep the old public move helper while using the same partitioning logic.
|
||||
return enqueue_bulk_parts(profile, "move", data)
|
||||
|
||||
|
||||
def enqueue_remove_bulk_parts(profile: dict, data: dict) -> list[dict]:
|
||||
# Note: Remove/rm dostaje identyczne dzielenie na party jak move, co zmniejsza load na rTorrent.
|
||||
# Note: Remove/rm uses the same partitioning as move, which lowers rTorrent load.
|
||||
return enqueue_bulk_parts(profile, "remove", data)
|
||||
|
||||
|
||||
@@ -413,6 +485,8 @@ def torrents():
|
||||
@bp.get("/torrent-stats")
|
||||
def torrent_stats_get():
|
||||
profile = preferences.active_profile()
|
||||
if not profile:
|
||||
return ok({"stats": {}, "error": "No profile"})
|
||||
force = str(request.args.get("force") or "").lower() in {"1", "true", "yes"}
|
||||
try:
|
||||
# Note: Heavy file metadata is served from a 15-minute DB cache unless the user explicitly refreshes it.
|
||||
@@ -640,7 +714,7 @@ def jobs_list():
|
||||
@bp.post("/jobs/clear")
|
||||
def jobs_clear():
|
||||
if str(request.args.get("force") or "").lower() in {"1", "true", "yes"}:
|
||||
# Awaryjne czyszczenie: endpoint zachowuje standardowe działanie, a force=1 uruchamia tryb ratunkowy.
|
||||
# Note: Emergency cleanup keeps the endpoint behavior unchanged, while force=1 enables rescue mode.
|
||||
deleted = emergency_clear_jobs()
|
||||
return ok({"deleted": deleted, "emergency": True})
|
||||
deleted = clear_jobs()
|
||||
@@ -685,6 +759,7 @@ def cleanup_all():
|
||||
|
||||
@bp.post("/jobs/<job_id>/cancel")
|
||||
def jobs_cancel(job_id: str):
|
||||
require_profile_write(_job_profile_id(job_id))
|
||||
if not cancel_job(job_id):
|
||||
return jsonify({"ok": False, "error": "Only unfinished jobs can be cancelled"}), 400
|
||||
return ok({"emergency": True})
|
||||
@@ -692,6 +767,7 @@ def jobs_cancel(job_id: str):
|
||||
|
||||
@bp.post("/jobs/<job_id>/retry")
|
||||
def jobs_retry(job_id: str):
|
||||
require_profile_write(_job_profile_id(job_id))
|
||||
if not retry_job(job_id):
|
||||
return jsonify({"ok": False, "error": "Only failed or cancelled jobs can be retried"}), 400
|
||||
return ok()
|
||||
@@ -910,7 +986,7 @@ def smart_queue_check():
|
||||
return ok({'result': {'ok': False, 'error': 'No profile'}})
|
||||
try:
|
||||
result = smart_queue.check(profile, force=True)
|
||||
# Note: Ręczny check zwraca od razu świeży snapshot, żeby UI pokazało realną liczbę Downloading po akcji.
|
||||
# Note: Manual check immediately returns a fresh snapshot so the UI shows the real Downloading count after the action.
|
||||
diff = torrent_cache.refresh(profile)
|
||||
rows = torrent_cache.snapshot(profile['id'])
|
||||
return ok({'result': result, 'torrent_patch': {**diff, 'summary': cached_summary(profile['id'], rows, force=True)}})
|
||||
|
||||
Reference in New Issue
Block a user