diff --git a/pytorrent/routes/api.py b/pytorrent/routes/api.py index 4782f05..5bc0ee3 100644 --- a/pytorrent/routes/api.py +++ b/pytorrent/routes/api.py @@ -19,7 +19,7 @@ from ..db import default_user_id, connect, utcnow from ..services import preferences, rtorrent from ..services.torrent_cache import torrent_cache from ..services.torrent_summary import cached_summary -from ..services.workers import enqueue, list_jobs, cancel_job, retry_job, clear_jobs +from ..services.workers import enqueue, list_jobs, cancel_job, retry_job, clear_jobs, emergency_clear_jobs from ..services.geoip import lookup_ip bp = Blueprint("api", __name__, url_prefix="/api") @@ -566,8 +566,12 @@ def jobs_list(): @bp.post("/jobs/clear") def jobs_clear(): + if str(request.args.get("force") or "").lower() in {"1", "true", "yes"}: + # Awaryjne czyszczenie: endpoint zachowuje standardowe działanie, a force=1 uruchamia tryb ratunkowy. + deleted = emergency_clear_jobs() + return ok({"deleted": deleted, "emergency": True}) deleted = clear_jobs() - return ok({"deleted": deleted}) + return ok({"deleted": deleted, "emergency": False}) @bp.get("/cleanup/summary") @@ -609,8 +613,8 @@ def cleanup_all(): @bp.post("/jobs//cancel") def jobs_cancel(job_id: str): if not cancel_job(job_id): - return jsonify({"ok": False, "error": "Only pending or failed jobs can be cancelled"}), 400 - return ok() + return jsonify({"ok": False, "error": "Only unfinished jobs can be cancelled"}), 400 + return ok({"emergency": True}) @bp.post("/jobs//retry") diff --git a/pytorrent/services/workers.py b/pytorrent/services/workers.py index 0d9b210..1c9711f 100644 --- a/pytorrent/services/workers.py +++ b/pytorrent/services/workers.py @@ -152,6 +152,10 @@ def _run(job_id: str): _emit("operation_started", {"job_id": job_id, "action": job["action"], "profile_id": profile["id"], "hashes": payload.get("hashes") or [], "hash_count": len(payload.get("hashes") or []), "bulk": len(payload.get("hashes") or []) > 1}) _emit("job_update", {"id": job_id, "status": "running", "attempts": attempts}) result = _execute(profile, job["action"], payload) + fresh = _job_row(job_id) + # Awaryjne anulowanie: jeżeli użytkownik anuluje zadanie w trakcie pracy, wynik nie nadpisuje statusu cancelled. + if fresh and fresh["status"] == "cancelled": + return _set_job(job_id, "done", result=result, finished=True) _emit("operation_finished", {"job_id": job_id, "action": job["action"], "profile_id": profile["id"], "hashes": payload.get("hashes") or [], "hash_count": len(payload.get("hashes") or []), "bulk": len(payload.get("hashes") or []) > 1, "result": result}) _emit("job_update", {"id": job_id, "status": "done", "result": result}) @@ -159,6 +163,9 @@ def _run(job_id: str): fresh = _job_row(job_id) or {} attempts = int(fresh.get("attempts") or 1) max_attempts = int(fresh.get("max_attempts") or 2) + # Awaryjne anulowanie: wyjątek z anulowanego zadania nie przywraca go do retry ani failed. + if fresh and fresh.get("status") == "cancelled": + return status = "pending" if attempts < max_attempts else "failed" _set_job(job_id, status, str(exc), finished=(status == "failed")) _emit("operation_failed", {"job_id": job_id, "action": job.get("action"), "profile_id": job.get("profile_id"), "hashes": payload.get("hashes") or [], "error": str(exc)}) @@ -226,8 +233,9 @@ def list_jobs(limit: int = 200, offset: int = 0): def cancel_job(job_id: str) -> bool: row = _job_row(job_id) - if not row or row["status"] not in {"pending", "failed"}: + if not row or row["status"] in {"done", "cancelled"}: return False + # Awaryjne anulowanie: pending, running i failed można oznaczyć jako cancelled z poziomu użytkownika. _set_job(job_id, "cancelled", finished=True) _emit("job_update", {"id": job_id, "status": "cancelled"}) return True @@ -239,6 +247,17 @@ def clear_jobs() -> int: return int(cur.rowcount or 0) +def emergency_clear_jobs() -> int: + # Awaryjne czyszczenie: najpierw zamyka aktywne zadania jako cancelled, potem czyści całą listę job logów. + now = utcnow() + with connect() as conn: + conn.execute("UPDATE jobs SET status='cancelled', error='Emergency cancelled by user', finished_at=COALESCE(finished_at, ?), updated_at=? WHERE status IN ('pending', 'running', 'failed')", (now, now)) + cur = conn.execute("DELETE FROM jobs") + deleted = int(cur.rowcount or 0) + _emit("job_update", {"status": "cleared", "emergency": True}) + return deleted + + def retry_job(job_id: str) -> bool: row = _job_row(job_id) if not row or row["status"] not in {"failed", "cancelled"}: diff --git a/pytorrent/static/app.js b/pytorrent/static/app.js index 2a61305..4fff591 100644 --- a/pytorrent/static/app.js +++ b/pytorrent/static/app.js @@ -311,10 +311,10 @@ $('saveColumnsBtn')?.addEventListener('click',async()=>{ document.querySelectorAll('.column-toggle').forEach(cb=>cb.checked?hiddenColumns.delete(cb.dataset.colKey):hiddenColumns.add(cb.dataset.colKey)); applyColumnVisibility(); scheduleRender(true); await post('/api/preferences',{table_columns_json:JSON.stringify({hidden:[...hiddenColumns]})}).catch(e=>toast(e.message,'danger')); toast('Columns saved','success'); }); $('resetColumnsBtn')?.addEventListener('click',async()=>{ hiddenColumns.clear(); renderColumnManager(); applyColumnVisibility(); scheduleRender(true); await post('/api/preferences',{table_columns_json:JSON.stringify({hidden:[]})}).catch(()=>{}); }); - async function loadJobs(page=jobsPage){ const box=$('jobsTable'); if(!box)return; jobsPage=Math.max(0,page|0); box.innerHTML=' Loading jobs...'; const offset=jobsPage*jobsLimit; const j=await (await fetch(`/api/jobs?limit=${jobsLimit}&offset=${offset}`)).json(); const rows=j.jobs||[]; jobsTotal=Number(j.total||rows.length); const details=r=>{ const count=Number(r.hash_count||0); if(r.is_bulk || count>1) return `bulk
${esc(count)} torrent(s), details hidden`; const bits=[]; if(count) bits.push(`${esc(count)} torrent`); if(r.summary) bits.push(esc(r.summary)); return bits.join('
') || '-'; }; box.innerHTML=table(['Status','Action','Profile','Count','Details','Attempts','Started','Finished','Error','Actions'],rows.map(r=>[`${esc(r.status)}`,esc(r.action),esc(r.profile_id),esc(r.hash_count||0),details(r),esc(r.attempts||0),dateCell(r.started_at||r.created_at),dateCell(r.finished_at||r.updated_at),compactCell(r.error||'',140),` `])); renderJobsPager(); } + async function loadJobs(page=jobsPage){ const box=$('jobsTable'); if(!box)return; jobsPage=Math.max(0,page|0); box.innerHTML=' Loading jobs...'; const offset=jobsPage*jobsLimit; const j=await (await fetch(`/api/jobs?limit=${jobsLimit}&offset=${offset}`)).json(); const rows=j.jobs||[]; jobsTotal=Number(j.total||rows.length); const details=r=>{ const count=Number(r.hash_count||0); if(r.is_bulk || count>1) return `bulk
${esc(count)} torrent(s), details hidden`; const bits=[]; if(count) bits.push(`${esc(count)} torrent`); if(r.summary) bits.push(esc(r.summary)); return bits.join('
') || '-'; }; box.innerHTML=table(['Status','Action','Profile','Count','Details','Attempts','Started','Finished','Error','Actions'],rows.map(r=>[`${esc(r.status)}`,esc(r.action),esc(r.profile_id),esc(r.hash_count||0),details(r),esc(r.attempts||0),dateCell(r.started_at||r.created_at),dateCell(r.finished_at||r.updated_at),compactCell(r.error||'',140),` `])); renderJobsPager(); } function renderJobsPager(){ const p=$('jobsPager'); if(!p)return; const pages=Math.max(1,Math.ceil(jobsTotal/jobsLimit)); p.innerHTML=`
Page ${jobsPage+1} / ${pages} · ${jobsTotal} jobs
`; $('jobsPrev')?.addEventListener('click',()=>loadJobs(jobsPage-1)); $('jobsNext')?.addEventListener('click',()=>loadJobs(jobsPage+1)); } - $('jobsModal')?.addEventListener('show.bs.modal',loadJobs); $('refreshJobsBtn')?.addEventListener('click',loadJobs); $('jobsTable')?.addEventListener('click',async e=>{ const btn=e.target.closest('.job-retry,.job-cancel'); if(!btn)return; const id=btn.dataset.id; if(!id)return; if(btn.classList.contains('job-retry')) await post(`/api/jobs/${id}/retry`,{}).catch(x=>toast(x.message,'danger')); if(btn.classList.contains('job-cancel')) await post(`/api/jobs/${id}/cancel`,{}).catch(x=>toast(x.message,'danger')); loadJobs(); }); - $('clearJobsBtn')?.addEventListener('click',async()=>{ if(!confirm('Clear finished job logs? Pending and running jobs will stay.')) return; try{ const j=await post('/api/jobs/clear',{}); toast(`Cleared ${j.deleted||0} job log(s)`,'success'); jobsPage=0; loadJobs(0); }catch(e){ toast(e.message,'danger'); } }); + $('jobsModal')?.addEventListener('show.bs.modal',loadJobs); $('refreshJobsBtn')?.addEventListener('click',loadJobs); $('jobsTable')?.addEventListener('click',async e=>{ const btn=e.target.closest('.job-retry,.job-cancel'); if(!btn)return; const id=btn.dataset.id; if(!id)return; if(btn.classList.contains('job-retry')) await post(`/api/jobs/${id}/retry`,{}).catch(x=>toast(x.message,'danger')); if(btn.classList.contains('job-cancel')){ const st=btn.dataset.status||''; if((st==='pending'||st==='running') && !confirm('Emergency cancel this unfinished job?')) return; await post(`/api/jobs/${id}/cancel`,{}).catch(x=>toast(x.message,'danger')); } loadJobs(); }); + $('clearJobsBtn')?.addEventListener('click',async()=>{ const emergency=confirm('Emergency clear all job logs, including unfinished jobs? OK = emergency clear, Cancel = clear only finished logs.'); if(!emergency && !confirm('Clear finished job logs? Pending and running jobs will stay.')) return; try{ const j=await post(`/api/jobs/clear${emergency?'?force=1':''}`,{}); toast(`${emergency?'Emergency cleared':'Cleared'} ${j.deleted||0} job log(s)`,'success'); jobsPage=0; loadJobs(0); }catch(e){ toast(e.message,'danger'); } }); async function loadLabels(){ const j=await (await fetch('/api/labels')).json(); const labels=j.labels||[]; knownLabels=labels; renderLabelFilters(); renderLabelChooser(); if($('labelsManager')) $('labelsManager').innerHTML=labels.length?labels.map(l=>`
${esc(l.name)}
`).join(''):'No labels.'; } function renderLabelChooser(){ if($('selectedLabelList')) $('selectedLabelList').innerHTML=[...modalLabels].map(l=>``).join('') || 'No labels selected.'; if($('labelList')) $('labelList').innerHTML=knownLabels.map(l=>``).join('') || 'No saved labels.'; } @@ -471,7 +471,7 @@ cleanupCountCard('Smart Queue logs', data.smart_queue_history_total, `retention ${retention.smart_queue_history||'-'} days`), cleanupCountCard('Database size', db.size_h||db.size||'-', db.path||'') ]; - box.innerHTML=`
${cards.join('')}
Job cleanup uses the existing job endpoint logic, so pending and running jobs are preserved.
`; + box.innerHTML=`
${cards.join('')}
Job cleanup preserves pending and running jobs. Use Jobs modal for emergency clear when unfinished jobs must be removed.
`; } async function loadCleanup(){ const box=$('cleanupManager'); if(!box) return;