bulk-part-jobs, and scgi retries
This commit is contained in:
@@ -311,13 +311,13 @@ def _chunk_hashes(hashes: list[str], size: int = MOVE_BULK_MAX_HASHES) -> list[l
|
||||
return [hashes[index:index + safe_size] for index in range(0, len(hashes), safe_size)]
|
||||
|
||||
|
||||
def enqueue_move_bulk_parts(profile: dict, data: dict) -> list[dict]:
|
||||
# Note: Keeps the existing move action intact for normal batches, while large moves are queued as bulk-1, bulk-2, etc.
|
||||
base_payload = enrich_bulk_payload(profile, "move", data)
|
||||
def enqueue_bulk_parts(profile: dict, action_name: str, data: dict) -> list[dict]:
|
||||
# Note: Jedna wspolna funkcja dzieli duze operacje move/remove na male, uporzadkowane party bez ruszania pozostalych akcji.
|
||||
base_payload = enrich_bulk_payload(profile, action_name, data)
|
||||
hashes = base_payload.get("hashes") or []
|
||||
chunks = _chunk_hashes(hashes)
|
||||
if len(chunks) <= 1:
|
||||
job_id = enqueue("move", profile["id"], base_payload)
|
||||
job_id = enqueue(action_name, profile["id"], base_payload)
|
||||
return [{"job_id": job_id, "label": "bulk-1", "part": 1, "parts": 1, "hashes": hashes, "hash_count": len(hashes)}]
|
||||
|
||||
jobs = []
|
||||
@@ -336,11 +336,21 @@ def enqueue_move_bulk_parts(profile: dict, data: dict) -> list[dict]:
|
||||
"items": [items_by_hash[h] for h in chunk if h in items_by_hash],
|
||||
})
|
||||
payload["job_context"] = context
|
||||
job_id = enqueue("move", profile["id"], payload)
|
||||
job_id = enqueue(action_name, profile["id"], payload)
|
||||
jobs.append({"job_id": job_id, "label": context["bulk_label"], "part": index, "parts": len(chunks), "hashes": chunk, "hash_count": len(chunk)})
|
||||
return jobs
|
||||
|
||||
|
||||
def enqueue_move_bulk_parts(profile: dict, data: dict) -> list[dict]:
|
||||
# Note: Zachowuje stary publiczny helper dla move, ale korzysta z tej samej logiki partycji.
|
||||
return enqueue_bulk_parts(profile, "move", data)
|
||||
|
||||
|
||||
def enqueue_remove_bulk_parts(profile: dict, data: dict) -> list[dict]:
|
||||
# Note: Remove/rm dostaje identyczne dzielenie na party jak move, co zmniejsza load na rTorrent.
|
||||
return enqueue_bulk_parts(profile, "remove", data)
|
||||
|
||||
|
||||
@bp.get("/profiles")
|
||||
def profiles_list():
|
||||
return ok({"profiles": preferences.list_profiles(), "active": preferences.active_profile()})
|
||||
@@ -475,9 +485,9 @@ def torrent_action(action_name: str):
|
||||
allowed = {"start", "pause", "stop", "resume", "recheck", "reannounce", "remove", "move", "set_label", "set_ratio_group"}
|
||||
if action_name not in allowed:
|
||||
return jsonify({"ok": False, "error": "Unknown action"}), 400
|
||||
if action_name == "move":
|
||||
# Note: Large move requests are split into ordered bulk parts; smaller requests keep the old single-job response shape.
|
||||
jobs = enqueue_move_bulk_parts(profile, data)
|
||||
if action_name in {"move", "remove"}:
|
||||
# Note: Large move/remove requests are split into ordered bulk parts; smaller requests keep the old single-job response shape.
|
||||
jobs = enqueue_bulk_parts(profile, action_name, data)
|
||||
first_job_id = jobs[0]["job_id"] if jobs else None
|
||||
total_hashes = sum(int(job.get("hash_count") or 0) for job in jobs)
|
||||
return ok({
|
||||
|
||||
Reference in New Issue
Block a user