Compare commits
11 Commits
master
...
fix_labesl
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
85e1e6adcd | ||
|
|
7a4bda98a2 | ||
|
|
e99d19ece0 | ||
|
|
f9280daa21 | ||
|
|
5d7abe6e59 | ||
|
|
440b187c39 | ||
|
|
2691442fc1 | ||
|
|
98f155b53a | ||
|
|
0730e7316c | ||
|
|
7c31136535 | ||
|
|
a72b6eb364 |
@@ -71,3 +71,4 @@ JOBS_RETENTION_DAYS = _env_int("PYTORRENT_JOBS_RETENTION_DAYS", 30, 1)
|
|||||||
SMART_QUEUE_HISTORY_RETENTION_DAYS = _env_int("PYTORRENT_SMART_QUEUE_HISTORY_RETENTION_DAYS", 30, 1)
|
SMART_QUEUE_HISTORY_RETENTION_DAYS = _env_int("PYTORRENT_SMART_QUEUE_HISTORY_RETENTION_DAYS", 30, 1)
|
||||||
LOG_RETENTION_DAYS = _env_int("PYTORRENT_LOG_RETENTION_DAYS", 30, 1)
|
LOG_RETENTION_DAYS = _env_int("PYTORRENT_LOG_RETENTION_DAYS", 30, 1)
|
||||||
SMART_QUEUE_LABEL = os.getenv("PYTORRENT_SMART_QUEUE_LABEL", "Smart Queue Paused")
|
SMART_QUEUE_LABEL = os.getenv("PYTORRENT_SMART_QUEUE_LABEL", "Smart Queue Paused")
|
||||||
|
SMART_QUEUE_STALLED_LABEL = os.getenv("PYTORRENT_SMART_QUEUE_STALLED_LABEL", "Stalled")
|
||||||
|
|||||||
@@ -139,6 +139,7 @@ CREATE TABLE IF NOT EXISTS smart_queue_settings (
|
|||||||
stalled_seconds INTEGER DEFAULT 300,
|
stalled_seconds INTEGER DEFAULT 300,
|
||||||
min_speed_bytes INTEGER DEFAULT 1024,
|
min_speed_bytes INTEGER DEFAULT 1024,
|
||||||
min_seeds INTEGER DEFAULT 1,
|
min_seeds INTEGER DEFAULT 1,
|
||||||
|
min_peers INTEGER DEFAULT 0,
|
||||||
manage_stopped INTEGER DEFAULT 0,
|
manage_stopped INTEGER DEFAULT 0,
|
||||||
updated_at TEXT NOT NULL,
|
updated_at TEXT NOT NULL,
|
||||||
PRIMARY KEY(user_id, profile_id)
|
PRIMARY KEY(user_id, profile_id)
|
||||||
@@ -280,6 +281,7 @@ MIGRATIONS = [
|
|||||||
"ALTER TABLE rtorrent_config_overrides ADD COLUMN baseline_value TEXT",
|
"ALTER TABLE rtorrent_config_overrides ADD COLUMN baseline_value TEXT",
|
||||||
"ALTER TABLE torrent_stats_cache ADD COLUMN updated_epoch REAL DEFAULT 0",
|
"ALTER TABLE torrent_stats_cache ADD COLUMN updated_epoch REAL DEFAULT 0",
|
||||||
"ALTER TABLE smart_queue_settings ADD COLUMN manage_stopped INTEGER DEFAULT 0",
|
"ALTER TABLE smart_queue_settings ADD COLUMN manage_stopped INTEGER DEFAULT 0",
|
||||||
|
"ALTER TABLE smart_queue_settings ADD COLUMN min_peers INTEGER DEFAULT 0",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@@ -326,9 +326,11 @@ def cleanup_summary() -> dict:
|
|||||||
"jobs_total": _table_count("jobs"),
|
"jobs_total": _table_count("jobs"),
|
||||||
"jobs_clearable": _table_count("jobs", "WHERE status NOT IN ('pending', 'running')"),
|
"jobs_clearable": _table_count("jobs", "WHERE status NOT IN ('pending', 'running')"),
|
||||||
"smart_queue_history_total": _table_count("smart_queue_history"),
|
"smart_queue_history_total": _table_count("smart_queue_history"),
|
||||||
|
"automation_history_total": _table_count("automation_history"),
|
||||||
"retention_days": {
|
"retention_days": {
|
||||||
"jobs": JOBS_RETENTION_DAYS,
|
"jobs": JOBS_RETENTION_DAYS,
|
||||||
"smart_queue_history": SMART_QUEUE_HISTORY_RETENTION_DAYS,
|
"smart_queue_history": SMART_QUEUE_HISTORY_RETENTION_DAYS,
|
||||||
|
"automation_history": SMART_QUEUE_HISTORY_RETENTION_DAYS,
|
||||||
},
|
},
|
||||||
"database": _db_size(),
|
"database": _db_size(),
|
||||||
}
|
}
|
||||||
@@ -731,6 +733,19 @@ def cleanup_smart_queue():
|
|||||||
return ok({"deleted": deleted, "cleanup": cleanup_summary()})
|
return ok({"deleted": deleted, "cleanup": cleanup_summary()})
|
||||||
|
|
||||||
|
|
||||||
|
@bp.post("/cleanup/automations")
|
||||||
|
def cleanup_automations():
|
||||||
|
with connect() as conn:
|
||||||
|
exists = conn.execute("SELECT name FROM sqlite_master WHERE type='table' AND name='automation_history'").fetchone()
|
||||||
|
if not exists:
|
||||||
|
deleted = 0
|
||||||
|
else:
|
||||||
|
# Note: Cleanup panel removes only automation logs, not saved automation rules.
|
||||||
|
cur = conn.execute("DELETE FROM automation_history")
|
||||||
|
deleted = int(cur.rowcount or 0)
|
||||||
|
return ok({"deleted": deleted, "cleanup": cleanup_summary()})
|
||||||
|
|
||||||
|
|
||||||
@bp.post("/cleanup/all")
|
@bp.post("/cleanup/all")
|
||||||
def cleanup_all():
|
def cleanup_all():
|
||||||
deleted_jobs = clear_jobs()
|
deleted_jobs = clear_jobs()
|
||||||
@@ -741,7 +756,13 @@ def cleanup_all():
|
|||||||
else:
|
else:
|
||||||
cur = conn.execute("DELETE FROM smart_queue_history")
|
cur = conn.execute("DELETE FROM smart_queue_history")
|
||||||
deleted_smart = int(cur.rowcount or 0)
|
deleted_smart = int(cur.rowcount or 0)
|
||||||
return ok({"deleted": {"jobs": deleted_jobs, "smart_queue_history": deleted_smart}, "cleanup": cleanup_summary()})
|
exists_auto = conn.execute("SELECT name FROM sqlite_master WHERE type='table' AND name='automation_history'").fetchone()
|
||||||
|
if not exists_auto:
|
||||||
|
deleted_auto = 0
|
||||||
|
else:
|
||||||
|
cur = conn.execute("DELETE FROM automation_history")
|
||||||
|
deleted_auto = int(cur.rowcount or 0)
|
||||||
|
return ok({"deleted": {"jobs": deleted_jobs, "smart_queue_history": deleted_smart, "automation_history": deleted_auto}, "cleanup": cleanup_summary()})
|
||||||
|
|
||||||
|
|
||||||
@bp.post("/jobs/<job_id>/cancel")
|
@bp.post("/jobs/<job_id>/cancel")
|
||||||
@@ -1062,3 +1083,17 @@ def automations_check():
|
|||||||
return ok({'result': automation_rules.check(profile, force=True), 'history': automation_rules.list_history(profile['id'])})
|
return ok({'result': automation_rules.check(profile, force=True), 'history': automation_rules.list_history(profile['id'])})
|
||||||
except Exception as exc:
|
except Exception as exc:
|
||||||
return jsonify({'ok': False, 'error': str(exc)}), 500
|
return jsonify({'ok': False, 'error': str(exc)}), 500
|
||||||
|
|
||||||
|
|
||||||
|
@bp.delete('/automations/history')
|
||||||
|
def automations_history_clear():
|
||||||
|
from ..services import automation_rules
|
||||||
|
profile = preferences.active_profile()
|
||||||
|
if not profile:
|
||||||
|
return jsonify({'ok': False, 'error': 'No profile'}), 400
|
||||||
|
try:
|
||||||
|
# Note: Clear only automation execution logs; rules and cooldown state stay unchanged.
|
||||||
|
deleted = automation_rules.clear_history(profile['id'])
|
||||||
|
return ok({'deleted': deleted, 'history': automation_rules.list_history(profile['id']), 'cleanup': cleanup_summary()})
|
||||||
|
except Exception as exc:
|
||||||
|
return jsonify({'ok': False, 'error': str(exc)}), 500
|
||||||
|
|||||||
@@ -106,7 +106,7 @@ def openapi():
|
|||||||
"/api/rss/feeds": {"post": {"summary": "Add RSS feed", "requestBody": {"content": {"application/json": {"schema": {"type": "object"}}}}, "responses": {"200": {"description": "RSS config"}}}},
|
"/api/rss/feeds": {"post": {"summary": "Add RSS feed", "requestBody": {"content": {"application/json": {"schema": {"type": "object"}}}}, "responses": {"200": {"description": "RSS config"}}}},
|
||||||
"/api/rss/rules": {"post": {"summary": "Add RSS rule", "requestBody": {"content": {"application/json": {"schema": {"type": "object"}}}}, "responses": {"200": {"description": "RSS config"}}}},
|
"/api/rss/rules": {"post": {"summary": "Add RSS rule", "requestBody": {"content": {"application/json": {"schema": {"type": "object"}}}}, "responses": {"200": {"description": "RSS config"}}}},
|
||||||
"/api/rss/check": {"post": {"summary": "Manually check RSS feeds", "responses": {"200": {"description": "Queued matches"}}}},
|
"/api/rss/check": {"post": {"summary": "Manually check RSS feeds", "responses": {"200": {"description": "Queued matches"}}}},
|
||||||
"/api/smart-queue": {"get": {"summary": "Get Smart Queue settings, exceptions and history", "parameters": [{"name": "history_limit", "in": "query", "schema": {"type": "integer", "default": 10, "minimum": 1, "maximum": 100}, "description": "Number of Smart Queue history rows to return"}], "responses": {"200": {"description": "Smart Queue config with history and history_total"}}}, "post": {"summary": "Save Smart Queue settings", "requestBody": {"content": {"application/json": {"schema": {"type": "object", "properties": {"enabled": {"type": "boolean"}, "max_active_downloads": {"type": "integer"}, "stalled_seconds": {"type": "integer"}, "min_speed_bytes": {"type": "integer"}, "min_seeds": {"type": "integer"}}}}}}, "responses": {"200": {"description": "Saved"}}}},
|
"/api/smart-queue": {"get": {"summary": "Get Smart Queue settings, exceptions and history", "parameters": [{"name": "history_limit", "in": "query", "schema": {"type": "integer", "default": 10, "minimum": 1, "maximum": 100}, "description": "Number of Smart Queue history rows to return"}], "responses": {"200": {"description": "Smart Queue config with history and history_total"}}}, "post": {"summary": "Save Smart Queue settings", "requestBody": {"content": {"application/json": {"schema": {"type": "object", "properties": {"enabled": {"type": "boolean"}, "max_active_downloads": {"type": "integer"}, "stalled_seconds": {"type": "integer"}, "min_speed_bytes": {"type": "integer"}, "min_seeds": {"type": "integer"}, "min_peers": {"type": "integer"}}}}}}, "responses": {"200": {"description": "Saved"}}}},
|
||||||
"/api/smart-queue/check": {"post": {"summary": "Run Smart Queue immediately", "responses": {"200": {"description": "Smart Queue action result"}}}},
|
"/api/smart-queue/check": {"post": {"summary": "Run Smart Queue immediately", "responses": {"200": {"description": "Smart Queue action result"}}}},
|
||||||
"/api/smart-queue/exclusion": {"post": {"summary": "Add or remove a torrent Smart Queue exception", "requestBody": {"content": {"application/json": {"schema": {"type": "object", "properties": {"hash": {"type": "string"}, "excluded": {"type": "boolean"}, "reason": {"type": "string"}}}}}}, "responses": {"200": {"description": "Exception list"}}}},
|
"/api/smart-queue/exclusion": {"post": {"summary": "Add or remove a torrent Smart Queue exception", "requestBody": {"content": {"application/json": {"schema": {"type": "object", "properties": {"hash": {"type": "string"}, "excluded": {"type": "boolean"}, "reason": {"type": "string"}}}}}}, "responses": {"200": {"description": "Exception list"}}}},
|
||||||
"/api/traffic/history": {"get": {"summary": "Transfer history for charts", "parameters": [{"name": "range", "in": "query", "schema": {"type": "string", "enum": ["15m", "1h", "3h", "6h", "24h", "7d", "30d", "90d"]}}], "responses": {"200": {"description": "Aggregated traffic history"}}}}
|
"/api/traffic/history": {"get": {"summary": "Transfer history for charts", "parameters": [{"name": "range", "in": "query", "schema": {"type": "string", "enum": ["15m", "1h", "3h", "6h", "24h", "7d", "30d", "90d"]}}], "responses": {"200": {"description": "Aggregated traffic history"}}}}
|
||||||
|
|||||||
@@ -5,6 +5,10 @@ import json
|
|||||||
from ..db import connect, default_user_id, utcnow
|
from ..db import connect, default_user_id, utcnow
|
||||||
from . import rtorrent
|
from . import rtorrent
|
||||||
from .preferences import active_profile
|
from .preferences import active_profile
|
||||||
|
from .workers import enqueue
|
||||||
|
|
||||||
|
AUTOMATION_JOB_CHUNK_SIZE = 100
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
def _loads(value: str | None, default: Any) -> Any:
|
def _loads(value: str | None, default: Any) -> Any:
|
||||||
@@ -94,6 +98,14 @@ def list_history(profile_id: int, user_id: int | None = None, limit: int = 30) -
|
|||||||
return conn.execute('SELECT * FROM automation_history WHERE user_id=? AND profile_id=? ORDER BY created_at DESC LIMIT ?', (user_id, profile_id, max(1, min(int(limit or 30), 100)))).fetchall()
|
return conn.execute('SELECT * FROM automation_history WHERE user_id=? AND profile_id=? ORDER BY created_at DESC LIMIT ?', (user_id, profile_id, max(1, min(int(limit or 30), 100)))).fetchall()
|
||||||
|
|
||||||
|
|
||||||
|
def clear_history(profile_id: int, user_id: int | None = None) -> int:
|
||||||
|
user_id = user_id or default_user_id()
|
||||||
|
with connect() as conn:
|
||||||
|
# Note: Manual automation log cleanup is scoped to the active profile and current user.
|
||||||
|
cur = conn.execute('DELETE FROM automation_history WHERE user_id=? AND profile_id=?', (user_id, profile_id))
|
||||||
|
return int(cur.rowcount or 0)
|
||||||
|
|
||||||
|
|
||||||
def _condition_true(t: dict[str, Any], cond: dict[str, Any]) -> bool:
|
def _condition_true(t: dict[str, Any], cond: dict[str, Any]) -> bool:
|
||||||
typ = str(cond.get('type') or '')
|
typ = str(cond.get('type') or '')
|
||||||
if typ == 'completed': return bool(int(t.get('complete') or 0))
|
if typ == 'completed': return bool(int(t.get('complete') or 0))
|
||||||
@@ -111,8 +123,11 @@ def _conditions_match(conn, rule: dict[str, Any], profile_id: int, t: dict[str,
|
|||||||
if not h: return False
|
if not h: return False
|
||||||
immediate_ok = True; delayed_ok = True; now = utcnow(); now_ts = _now_ts()
|
immediate_ok = True; delayed_ok = True; now = utcnow(); now_ts = _now_ts()
|
||||||
for cond in rule.get('conditions') or []:
|
for cond in rule.get('conditions') or []:
|
||||||
ok = _condition_true(t, cond)
|
raw_ok = _condition_true(t, cond)
|
||||||
if cond.get('type') == 'no_seeds' and int(cond.get('minutes') or 0) > 0:
|
negated = bool(cond.get('negate'))
|
||||||
|
# Note: Negation is applied in the backend, so UI and API only store the condition flag.
|
||||||
|
ok = (not raw_ok) if negated else raw_ok
|
||||||
|
if cond.get('type') == 'no_seeds' and int(cond.get('minutes') or 0) > 0 and not negated:
|
||||||
row = conn.execute('SELECT condition_since_at FROM automation_rule_state WHERE rule_id=? AND profile_id=? AND torrent_hash=?', (rule['id'], profile_id, h)).fetchone()
|
row = conn.execute('SELECT condition_since_at FROM automation_rule_state WHERE rule_id=? AND profile_id=? AND torrent_hash=?', (rule['id'], profile_id, h)).fetchone()
|
||||||
if ok:
|
if ok:
|
||||||
since = row['condition_since_at'] if row and row.get('condition_since_at') else now
|
since = row['condition_since_at'] if row and row.get('condition_since_at') else now
|
||||||
@@ -125,33 +140,143 @@ def _conditions_match(conn, rule: dict[str, Any], profile_id: int, t: dict[str,
|
|||||||
return immediate_ok and delayed_ok
|
return immediate_ok and delayed_ok
|
||||||
|
|
||||||
|
|
||||||
def _cooldown_ok(conn, rule: dict[str, Any], profile_id: int, torrent_hash: str) -> bool:
|
def _cooldown_ok(conn, rule: dict[str, Any], profile_id: int, torrent_hash: str = '__rule__') -> bool:
|
||||||
cooldown = int(rule.get('cooldown_minutes') or 0)
|
cooldown = int(rule.get('cooldown_minutes') or 0)
|
||||||
|
if cooldown <= 0: return True
|
||||||
row = conn.execute('SELECT last_applied_at FROM automation_rule_state WHERE rule_id=? AND profile_id=? AND torrent_hash=?', (rule['id'], profile_id, torrent_hash)).fetchone()
|
row = conn.execute('SELECT last_applied_at FROM automation_rule_state WHERE rule_id=? AND profile_id=? AND torrent_hash=?', (rule['id'], profile_id, torrent_hash)).fetchone()
|
||||||
if not row or not row.get('last_applied_at'): return True
|
if not row or not row.get('last_applied_at'): return True
|
||||||
return _now_ts() - _ts(row['last_applied_at']) >= cooldown * 60
|
return _now_ts() - _ts(row['last_applied_at']) >= cooldown * 60
|
||||||
|
|
||||||
|
|
||||||
def _apply_effects(c: Any, profile: dict[str, Any], torrent: dict[str, Any], effects: list[dict[str, Any]]) -> list[dict[str, Any]]:
|
def _mark_rule_cooldown(conn, rule: dict[str, Any], profile_id: int, now: str) -> None:
|
||||||
h = str(torrent.get('hash') or ''); labels = _label_names(torrent.get('label')); applied = []
|
# Note: Cooldown is rule-level, so one batch execution blocks the whole automation until the cooldown expires.
|
||||||
|
conn.execute('INSERT INTO automation_rule_state(rule_id,profile_id,torrent_hash,last_applied_at,updated_at) VALUES(?,?,?,?,?) ON CONFLICT(rule_id,profile_id,torrent_hash) DO UPDATE SET last_applied_at=excluded.last_applied_at, updated_at=excluded.updated_at', (rule['id'], profile_id, '__rule__', now, now))
|
||||||
|
|
||||||
|
|
||||||
|
def _chunk_hashes(hashes: list[str], size: int = AUTOMATION_JOB_CHUNK_SIZE) -> list[list[str]]:
|
||||||
|
# Note: Automation jobs use the same small-batch idea as manual bulk jobs, so long move/remove/actions remain visible and recoverable.
|
||||||
|
safe_size = max(1, int(size or AUTOMATION_JOB_CHUNK_SIZE))
|
||||||
|
return [hashes[index:index + safe_size] for index in range(0, len(hashes), safe_size)]
|
||||||
|
|
||||||
|
|
||||||
|
def _job_context(rule: dict[str, Any], eff_type: str, hashes: list[str], torrents_by_hash: dict[str, dict[str, Any]], extra: dict[str, Any] | None = None) -> dict[str, Any]:
|
||||||
|
# Note: Job context marks jobs created by automations, making the Jobs log explain what rule queued the work.
|
||||||
|
ctx = {
|
||||||
|
'source': 'automation',
|
||||||
|
'rule_id': rule.get('id'),
|
||||||
|
'rule_name': str(rule.get('name') or ''),
|
||||||
|
'effect': eff_type,
|
||||||
|
'bulk': len(hashes) > 1,
|
||||||
|
'hash_count': len(hashes),
|
||||||
|
'requested_at': utcnow(),
|
||||||
|
'items': [
|
||||||
|
{
|
||||||
|
'hash': h,
|
||||||
|
'name': str((torrents_by_hash.get(h) or {}).get('name') or ''),
|
||||||
|
'path': str((torrents_by_hash.get(h) or {}).get('path') or ''),
|
||||||
|
}
|
||||||
|
for h in hashes
|
||||||
|
],
|
||||||
|
}
|
||||||
|
if extra:
|
||||||
|
ctx.update(extra)
|
||||||
|
return ctx
|
||||||
|
|
||||||
|
|
||||||
|
def _enqueue_automation_job(profile: dict[str, Any], rule: dict[str, Any], action_name: str, hashes: list[str], payload: dict[str, Any], torrents_by_hash: dict[str, dict[str, Any]], user_id: int | None = None, context_extra: dict[str, Any] | None = None) -> list[str]:
|
||||||
|
# Note: Every automation side effect is queued as a normal job instead of running inline, so it appears in Jobs and uses worker retries/ordering.
|
||||||
|
job_ids: list[str] = []
|
||||||
|
chunks = _chunk_hashes(hashes)
|
||||||
|
for index, chunk in enumerate(chunks, start=1):
|
||||||
|
part_payload = dict(payload or {})
|
||||||
|
part_payload['hashes'] = chunk
|
||||||
|
part_payload['automation_ordered'] = True
|
||||||
|
extra = dict(context_extra or {})
|
||||||
|
if len(chunks) > 1:
|
||||||
|
extra.update({'bulk_label': f'automation-{index}', 'bulk_part': index, 'bulk_parts': len(chunks), 'parent_hash_count': len(hashes)})
|
||||||
|
if action_name == 'move':
|
||||||
|
extra.update({'target_path': str(part_payload.get('path') or ''), 'move_data': bool(part_payload.get('move_data'))})
|
||||||
|
if action_name == 'remove':
|
||||||
|
extra.update({'remove_data': bool(part_payload.get('remove_data'))})
|
||||||
|
part_payload['job_context'] = _job_context(rule, str(context_extra.get('effect_type') if context_extra else action_name), chunk, torrents_by_hash, extra)
|
||||||
|
job_ids.append(enqueue(action_name, int(profile['id']), part_payload, user_id=user_id))
|
||||||
|
return job_ids
|
||||||
|
|
||||||
|
|
||||||
|
def _apply_effects_bulk(c: Any, profile: dict[str, Any], torrents: list[dict[str, Any]], effects: list[dict[str, Any]], rule: dict[str, Any], user_id: int | None = None) -> list[dict[str, Any]]:
|
||||||
|
hashes = [str(t.get('hash') or '') for t in torrents if str(t.get('hash') or '')]
|
||||||
|
torrents_by_hash = {str(t.get('hash') or ''): t for t in torrents if str(t.get('hash') or '')}
|
||||||
|
labels_by_hash = {str(t.get('hash') or ''): _label_names(t.get('label')) for t in torrents}
|
||||||
|
applied: list[dict[str, Any]] = []
|
||||||
|
if not hashes: return applied
|
||||||
for eff in effects:
|
for eff in effects:
|
||||||
typ = str(eff.get('type') or '')
|
typ = str(eff.get('type') or '')
|
||||||
if typ == 'move':
|
if typ == 'move':
|
||||||
# Note: Automation move-to-path now uses the same move implementation as the main app action.
|
|
||||||
path = str(eff.get('path') or '').strip() or rtorrent.default_download_path(profile)
|
path = str(eff.get('path') or '').strip() or rtorrent.default_download_path(profile)
|
||||||
move_payload = {'path': path, 'move_data': bool(eff.get('move_data')), 'recheck': bool(eff.get('recheck', eff.get('move_data'))), 'keep_seeding': bool(eff.get('keep_seeding'))}
|
payload = {
|
||||||
result = rtorrent.move_torrents(profile, [h], move_payload) if path else None
|
'path': path,
|
||||||
if path: applied.append({'type': 'move', 'path': path, 'move_data': bool(eff.get('move_data')), 'recheck': bool(move_payload['recheck']), 'keep_seeding': bool(eff.get('keep_seeding')), 'result': result})
|
'move_data': bool(eff.get('move_data')),
|
||||||
|
'recheck': bool(eff.get('recheck', eff.get('move_data'))),
|
||||||
|
'keep_seeding': bool(eff.get('keep_seeding')),
|
||||||
|
}
|
||||||
|
job_ids = _enqueue_automation_job(profile, rule, 'move', hashes, payload, torrents_by_hash, user_id, {'effect_type': 'move'})
|
||||||
|
applied.append({'type': 'move', 'path': path, 'count': len(hashes), 'target_hashes': hashes, 'move_data': payload['move_data'], 'recheck': payload['recheck'], 'keep_seeding': payload['keep_seeding'], 'job_ids': job_ids})
|
||||||
elif typ == 'add_label':
|
elif typ == 'add_label':
|
||||||
label = str(eff.get('label') or '').strip()
|
label = str(eff.get('label') or '').strip()
|
||||||
if label and label not in labels: labels.append(label); c.call('d.custom1.set', h, _label_value(labels))
|
if label:
|
||||||
if label: applied.append({'type': 'add_label', 'label': label})
|
# Note: Add-label automations are idempotent and queue only torrents that need a changed label value.
|
||||||
|
grouped: dict[str, list[str]] = {}
|
||||||
|
for h in hashes:
|
||||||
|
labels = labels_by_hash.get(h, [])
|
||||||
|
if label in labels:
|
||||||
|
continue
|
||||||
|
new_labels = list(labels) + [label]
|
||||||
|
value = _label_value(new_labels)
|
||||||
|
labels_by_hash[h] = _label_names(value)
|
||||||
|
grouped.setdefault(value, []).append(h)
|
||||||
|
target_hashes = [h for group in grouped.values() for h in group]
|
||||||
|
job_ids: list[str] = []
|
||||||
|
for value, group_hashes in grouped.items():
|
||||||
|
job_ids.extend(_enqueue_automation_job(profile, rule, 'set_label', group_hashes, {'label': value}, torrents_by_hash, user_id, {'effect_type': 'add_label', 'label': label}))
|
||||||
|
if target_hashes:
|
||||||
|
applied.append({'type': 'add_label', 'label': label, 'count': len(target_hashes), 'target_hashes': target_hashes, 'job_ids': job_ids})
|
||||||
elif typ == 'remove_label':
|
elif typ == 'remove_label':
|
||||||
label = str(eff.get('label') or '').strip(); labels = [x for x in labels if x != label]; c.call('d.custom1.set', h, _label_value(labels)); applied.append({'type': 'remove_label', 'label': label})
|
label = str(eff.get('label') or '').strip()
|
||||||
|
if label:
|
||||||
|
# Note: Remove-label automations are queued only for torrents where the requested label exists.
|
||||||
|
grouped: dict[str, list[str]] = {}
|
||||||
|
for h in hashes:
|
||||||
|
labels = labels_by_hash.get(h, [])
|
||||||
|
if label not in labels:
|
||||||
|
continue
|
||||||
|
value = _label_value([x for x in labels if x != label])
|
||||||
|
labels_by_hash[h] = _label_names(value)
|
||||||
|
grouped.setdefault(value, []).append(h)
|
||||||
|
target_hashes = [h for group in grouped.values() for h in group]
|
||||||
|
job_ids: list[str] = []
|
||||||
|
for value, group_hashes in grouped.items():
|
||||||
|
job_ids.extend(_enqueue_automation_job(profile, rule, 'set_label', group_hashes, {'label': value}, torrents_by_hash, user_id, {'effect_type': 'remove_label', 'label': label}))
|
||||||
|
if target_hashes:
|
||||||
|
applied.append({'type': 'remove_label', 'label': label, 'count': len(target_hashes), 'target_hashes': target_hashes, 'job_ids': job_ids})
|
||||||
elif typ == 'set_labels':
|
elif typ == 'set_labels':
|
||||||
value = _label_value(_label_names(eff.get('labels'))); c.call('d.custom1.set', h, value); labels = _label_names(value); applied.append({'type': 'set_labels', 'labels': value})
|
value = _label_value(_label_names(eff.get('labels')))
|
||||||
elif typ in {'pause', 'stop', 'start', 'resume', 'recheck'}:
|
target_labels = _label_names(value)
|
||||||
method = {'pause':'d.pause','stop':'d.stop','start':'d.start','resume':'d.resume','recheck':'d.check_hash'}[typ]; c.call(method, h); applied.append({'type': typ})
|
# Note: Set-labels queues a job only if the current labels differ from the requested exact list.
|
||||||
|
target_hashes = [h for h in hashes if labels_by_hash.get(h, []) != target_labels]
|
||||||
|
for h in target_hashes:
|
||||||
|
labels_by_hash[h] = list(target_labels)
|
||||||
|
if target_hashes:
|
||||||
|
job_ids = _enqueue_automation_job(profile, rule, 'set_label', target_hashes, {'label': value}, torrents_by_hash, user_id, {'effect_type': 'set_labels', 'labels': value})
|
||||||
|
applied.append({'type': 'set_labels', 'labels': value, 'count': len(target_hashes), 'target_hashes': target_hashes, 'job_ids': job_ids})
|
||||||
|
elif typ in {'pause', 'stop', 'start', 'resume', 'recheck', 'reannounce'}:
|
||||||
|
# Note: Runtime actions are queued as jobs too, so automation activity is visible in the Jobs panel.
|
||||||
|
job_ids = _enqueue_automation_job(profile, rule, typ, hashes, {}, torrents_by_hash, user_id, {'effect_type': typ})
|
||||||
|
applied.append({'type': typ, 'count': len(hashes), 'target_hashes': hashes, 'job_ids': job_ids})
|
||||||
|
elif typ == 'remove':
|
||||||
|
# Note: Remove is supported for automation payloads and still goes through ordered worker jobs.
|
||||||
|
payload = {'remove_data': bool(eff.get('remove_data'))}
|
||||||
|
job_ids = _enqueue_automation_job(profile, rule, 'remove', hashes, payload, torrents_by_hash, user_id, {'effect_type': 'remove'})
|
||||||
|
applied.append({'type': 'remove', 'count': len(hashes), 'target_hashes': hashes, 'remove_data': payload['remove_data'], 'job_ids': job_ids})
|
||||||
return applied
|
return applied
|
||||||
|
|
||||||
|
|
||||||
@@ -160,17 +285,44 @@ def check(profile: dict | None = None, user_id: int | None = None, force: bool =
|
|||||||
if not profile: return {'ok': False, 'error': 'No active rTorrent profile'}
|
if not profile: return {'ok': False, 'error': 'No active rTorrent profile'}
|
||||||
user_id = user_id or default_user_id(); profile_id = int(profile['id'])
|
user_id = user_id or default_user_id(); profile_id = int(profile['id'])
|
||||||
rules = [r for r in list_rules(profile_id, user_id) if force or int(r.get('enabled') or 0)]
|
rules = [r for r in list_rules(profile_id, user_id) if force or int(r.get('enabled') or 0)]
|
||||||
if not rules: return {'ok': True, 'checked': 0, 'applied': [], 'rules': 0}
|
if not rules: return {'ok': True, 'checked': 0, 'applied': [], 'batches': [], 'rules': 0}
|
||||||
torrents = rtorrent.list_torrents(profile); c = rtorrent.client_for(profile); applied = []; now = utcnow()
|
torrents = rtorrent.list_torrents(profile); applied = []; batches = []; now = utcnow()
|
||||||
|
planned: list[dict[str, Any]] = []
|
||||||
with connect() as conn:
|
with connect() as conn:
|
||||||
for rule in rules:
|
for rule in rules:
|
||||||
for t in torrents:
|
# Note: This pass only matches rules and updates condition timers; job creation is intentionally delayed until after this DB transaction commits.
|
||||||
h = str(t.get('hash') or '')
|
if not force and not _cooldown_ok(conn, rule, profile_id):
|
||||||
if not _conditions_match(conn, rule, profile_id, t): continue
|
continue
|
||||||
if not force and not _cooldown_ok(conn, rule, profile_id, h): continue
|
matched = [t for t in torrents if _conditions_match(conn, rule, profile_id, t)]
|
||||||
try: actions = _apply_effects(c, profile, t, rule.get('effects') or [])
|
if not matched:
|
||||||
except Exception as exc: actions = [{'error': str(exc)}]
|
continue
|
||||||
|
hashes = [str(t.get('hash') or '') for t in matched if str(t.get('hash') or '')]
|
||||||
|
if hashes:
|
||||||
|
planned.append({'rule': rule, 'matched': matched, 'hashes': hashes})
|
||||||
|
for item in planned:
|
||||||
|
rule = item['rule']
|
||||||
|
matched = item['matched']
|
||||||
|
hashes = item['hashes']
|
||||||
|
# Note: Automation jobs are enqueued outside the rule-state transaction, preventing SQLite self-locks when enqueue() writes to jobs.
|
||||||
|
try:
|
||||||
|
actions = _apply_effects_bulk(None, profile, matched, rule.get('effects') or [], rule, user_id)
|
||||||
|
except Exception as exc:
|
||||||
|
actions = [{'error': str(exc), 'count': len(hashes), 'target_hashes': hashes}]
|
||||||
|
changed_hashes = sorted({h for a in actions for h in (a.get('target_hashes') or [])})
|
||||||
|
if not actions or not changed_hashes:
|
||||||
|
# Note: Matching torrents with no real action are not logged and do not restart the cooldown.
|
||||||
|
continue
|
||||||
|
history_actions = [{k: v for k, v in a.items() if k != 'target_hashes'} for a in actions]
|
||||||
|
matched_by_hash = {str(t.get('hash') or ''): t for t in matched}
|
||||||
|
with connect() as conn:
|
||||||
|
# Note: State/history writes happen after enqueue succeeds, so failed job creation does not create misleading automation history.
|
||||||
|
for h in changed_hashes:
|
||||||
|
t = matched_by_hash.get(h, {})
|
||||||
conn.execute('INSERT INTO automation_rule_state(rule_id,profile_id,torrent_hash,last_matched_at,last_applied_at,updated_at) VALUES(?,?,?,?,?,?) ON CONFLICT(rule_id,profile_id,torrent_hash) DO UPDATE SET last_matched_at=excluded.last_matched_at, last_applied_at=excluded.last_applied_at, updated_at=excluded.updated_at', (rule['id'], profile_id, h, now, now, now))
|
conn.execute('INSERT INTO automation_rule_state(rule_id,profile_id,torrent_hash,last_matched_at,last_applied_at,updated_at) VALUES(?,?,?,?,?,?) ON CONFLICT(rule_id,profile_id,torrent_hash) DO UPDATE SET last_matched_at=excluded.last_matched_at, last_applied_at=excluded.last_applied_at, updated_at=excluded.updated_at', (rule['id'], profile_id, h, now, now, now))
|
||||||
conn.execute('INSERT INTO automation_history(user_id,profile_id,rule_id,torrent_hash,torrent_name,rule_name,actions_json,created_at) VALUES(?,?,?,?,?,?,?,?)', (user_id, profile_id, rule['id'], h, str(t.get('name') or ''), str(rule.get('name') or ''), json.dumps(actions), now))
|
applied.append({'rule_id': rule['id'], 'rule_name': rule.get('name'), 'hash': h, 'name': t.get('name'), 'actions': [{'type': a.get('type', 'error'), 'count': a.get('count', len(changed_hashes))} for a in actions]})
|
||||||
applied.append({'rule_id': rule['id'], 'rule_name': rule.get('name'), 'hash': h, 'name': t.get('name'), 'actions': actions})
|
_mark_rule_cooldown(conn, rule, profile_id, now)
|
||||||
return {'ok': True, 'checked': len(torrents), 'rules': len(rules), 'applied': applied}
|
torrent_name = str(matched_by_hash.get(changed_hashes[0], {}).get('name') or '') if len(changed_hashes) == 1 else f'{len(changed_hashes)} torrents'
|
||||||
|
torrent_hash = changed_hashes[0] if len(changed_hashes) == 1 else f'batch:{rule["id"]}:{now}'
|
||||||
|
conn.execute('INSERT INTO automation_history(user_id,profile_id,rule_id,torrent_hash,torrent_name,rule_name,actions_json,created_at) VALUES(?,?,?,?,?,?,?,?)', (user_id, profile_id, rule['id'], torrent_hash, torrent_name, str(rule.get('name') or ''), json.dumps(history_actions), now))
|
||||||
|
batches.append({'rule_id': rule['id'], 'rule_name': rule.get('name'), 'count': len(changed_hashes), 'actions': history_actions})
|
||||||
|
return {'ok': True, 'checked': len(torrents), 'rules': len(rules), 'applied': applied, 'batches': batches}
|
||||||
|
|||||||
@@ -30,6 +30,8 @@ def cleanup(force: bool = False) -> dict[str, int]:
|
|||||||
targets = {
|
targets = {
|
||||||
"traffic_history": ("created_at", TRAFFIC_HISTORY_RETENTION_DAYS),
|
"traffic_history": ("created_at", TRAFFIC_HISTORY_RETENTION_DAYS),
|
||||||
"smart_queue_history": ("created_at", SMART_QUEUE_HISTORY_RETENTION_DAYS),
|
"smart_queue_history": ("created_at", SMART_QUEUE_HISTORY_RETENTION_DAYS),
|
||||||
|
# Note: Automation history follows Smart Queue retention; rules and rule state are never deleted here.
|
||||||
|
"automation_history": ("created_at", SMART_QUEUE_HISTORY_RETENTION_DAYS),
|
||||||
"jobs": ("updated_at", JOBS_RETENTION_DAYS),
|
"jobs": ("updated_at", JOBS_RETENTION_DAYS),
|
||||||
"logs": ("created_at", LOG_RETENTION_DAYS),
|
"logs": ("created_at", LOG_RETENTION_DAYS),
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1274,73 +1274,6 @@ def start_or_resume_hash(c: ScgiRtorrentClient, torrent_hash: str) -> dict:
|
|||||||
result['ok'] = result.get('ok', True)
|
result['ok'] = result.get('ok', True)
|
||||||
return result
|
return result
|
||||||
|
|
||||||
|
|
||||||
def move_torrents(profile: dict, torrent_hashes: list[str], payload: dict | None = None) -> dict:
|
|
||||||
# Note: Shared move implementation keeps API move and automation move-to-path identical.
|
|
||||||
payload = payload or {}
|
|
||||||
c = client_for(profile)
|
|
||||||
path = _remote_clean_path(payload.get("path") or "")
|
|
||||||
move_data = bool(payload.get("move_data"))
|
|
||||||
recheck = bool(payload.get("recheck", move_data))
|
|
||||||
keep_seeding = bool(payload.get("keep_seeding"))
|
|
||||||
# Note: keep_seeding lets automation move completed data to another path and force the torrent back into seeding.
|
|
||||||
if not path:
|
|
||||||
raise ValueError("Missing path")
|
|
||||||
results = []
|
|
||||||
if move_data:
|
|
||||||
_rt_execute_allow_timeout(c, "execute.throw", "mkdir", "-p", path)
|
|
||||||
for h in torrent_hashes:
|
|
||||||
item = {"hash": h, "path": path, "move_data": move_data, "keep_seeding": keep_seeding}
|
|
||||||
try:
|
|
||||||
was_state = int(c.call("d.state", h) or 0)
|
|
||||||
except Exception:
|
|
||||||
was_state = 0
|
|
||||||
try:
|
|
||||||
was_active = int(c.call("d.is_active", h) or 0)
|
|
||||||
except Exception:
|
|
||||||
was_active = was_state
|
|
||||||
if move_data:
|
|
||||||
src = _remote_clean_path(_torrent_data_path(c, h))
|
|
||||||
if not src:
|
|
||||||
raise ValueError(f"Cannot determine source path for {h}")
|
|
||||||
dst = _remote_join(path, posixpath.basename(src.rstrip("/")))
|
|
||||||
if src != dst:
|
|
||||||
try:
|
|
||||||
c.call("d.stop", h)
|
|
||||||
except Exception:
|
|
||||||
pass
|
|
||||||
try:
|
|
||||||
c.call("d.close", h)
|
|
||||||
except Exception:
|
|
||||||
pass
|
|
||||||
_run_remote_move(c, src, dst)
|
|
||||||
item["moved_from"] = src
|
|
||||||
item["moved_to"] = dst
|
|
||||||
else:
|
|
||||||
item["skipped"] = "source and destination are the same"
|
|
||||||
c.call("d.directory.set", h, path)
|
|
||||||
if recheck:
|
|
||||||
try:
|
|
||||||
c.call("d.check_hash", h)
|
|
||||||
except Exception as exc:
|
|
||||||
item["recheck_error"] = str(exc)
|
|
||||||
if keep_seeding or was_state or was_active:
|
|
||||||
try:
|
|
||||||
c.call("d.start", h)
|
|
||||||
item["started_after_move"] = True
|
|
||||||
except Exception as exc:
|
|
||||||
item["start_error"] = str(exc)
|
|
||||||
else:
|
|
||||||
c.call("d.directory.set", h, path)
|
|
||||||
if keep_seeding:
|
|
||||||
try:
|
|
||||||
c.call("d.start", h)
|
|
||||||
item["started_after_path_change"] = True
|
|
||||||
except Exception as exc:
|
|
||||||
item["start_error"] = str(exc)
|
|
||||||
results.append(item)
|
|
||||||
return {"ok": True, "count": len(torrent_hashes), "move_data": move_data, "keep_seeding": keep_seeding, "results": results}
|
|
||||||
|
|
||||||
def action(profile: dict, torrent_hashes: list[str], name: str, payload: dict | None = None) -> dict:
|
def action(profile: dict, torrent_hashes: list[str], name: str, payload: dict | None = None) -> dict:
|
||||||
payload = payload or {}
|
payload = payload or {}
|
||||||
c = client_for(profile)
|
c = client_for(profile)
|
||||||
@@ -1361,8 +1294,61 @@ def action(profile: dict, torrent_hashes: list[str], name: str, payload: dict |
|
|||||||
c.call("d.custom.set", h, "py_ratio_group", group)
|
c.call("d.custom.set", h, "py_ratio_group", group)
|
||||||
return {"ok": True, "count": len(torrent_hashes), "ratio_group": group}
|
return {"ok": True, "count": len(torrent_hashes), "ratio_group": group}
|
||||||
if name == "move":
|
if name == "move":
|
||||||
# Note: Main move delegates to the shared helper used by automations.
|
path = _remote_clean_path(payload.get("path") or "")
|
||||||
return move_torrents(profile, torrent_hashes, payload)
|
move_data = bool(payload.get("move_data"))
|
||||||
|
recheck = bool(payload.get("recheck", move_data))
|
||||||
|
keep_seeding = bool(payload.get("keep_seeding"))
|
||||||
|
# Note: Automations can force seeding after a physical move even if the torrent was not active before.
|
||||||
|
if not path:
|
||||||
|
raise ValueError("Missing path")
|
||||||
|
results = []
|
||||||
|
if move_data:
|
||||||
|
_rt_execute_allow_timeout(c, "execute.throw", "mkdir", "-p", path)
|
||||||
|
for h in torrent_hashes:
|
||||||
|
item = {"hash": h, "path": path, "move_data": move_data, "keep_seeding": keep_seeding}
|
||||||
|
try:
|
||||||
|
was_state = int(c.call("d.state", h) or 0)
|
||||||
|
except Exception:
|
||||||
|
was_state = 0
|
||||||
|
try:
|
||||||
|
was_active = int(c.call("d.is_active", h) or 0)
|
||||||
|
except Exception:
|
||||||
|
was_active = was_state
|
||||||
|
if move_data:
|
||||||
|
src = _remote_clean_path(_torrent_data_path(c, h))
|
||||||
|
if not src:
|
||||||
|
raise ValueError(f"Cannot determine source path for {h}")
|
||||||
|
dst = _remote_join(path, posixpath.basename(src.rstrip("/")))
|
||||||
|
if src != dst:
|
||||||
|
try:
|
||||||
|
c.call("d.stop", h)
|
||||||
|
except Exception:
|
||||||
|
pass
|
||||||
|
try:
|
||||||
|
c.call("d.close", h)
|
||||||
|
except Exception:
|
||||||
|
pass
|
||||||
|
_run_remote_move(c, src, dst)
|
||||||
|
item["moved_from"] = src
|
||||||
|
item["moved_to"] = dst
|
||||||
|
else:
|
||||||
|
item["skipped"] = "source and destination are the same"
|
||||||
|
c.call("d.directory.set", h, path)
|
||||||
|
if recheck:
|
||||||
|
try:
|
||||||
|
c.call("d.check_hash", h)
|
||||||
|
except Exception as exc:
|
||||||
|
item["recheck_error"] = str(exc)
|
||||||
|
if keep_seeding or was_state or was_active:
|
||||||
|
try:
|
||||||
|
c.call("d.start", h)
|
||||||
|
item["started_after_move"] = True
|
||||||
|
except Exception as exc:
|
||||||
|
item["start_after_move_error"] = str(exc)
|
||||||
|
else:
|
||||||
|
c.call("d.directory.set", h, path)
|
||||||
|
results.append(item)
|
||||||
|
return {"ok": True, "count": len(torrent_hashes), "move_data": move_data, "keep_seeding": keep_seeding, "results": results}
|
||||||
if name == "pause":
|
if name == "pause":
|
||||||
# Note: The app pause action is now a pure d.pause so later resume works without stop/start.
|
# Note: The app pause action is now a pure d.pause so later resume works without stop/start.
|
||||||
results = [pause_hash(c, h) for h in torrent_hashes]
|
results = [pause_hash(c, h) for h in torrent_hashes]
|
||||||
|
|||||||
@@ -5,7 +5,7 @@ from typing import Any
|
|||||||
import json
|
import json
|
||||||
import time
|
import time
|
||||||
|
|
||||||
from ..config import SMART_QUEUE_LABEL
|
from ..config import SMART_QUEUE_LABEL, SMART_QUEUE_STALLED_LABEL
|
||||||
from ..db import connect, default_user_id, utcnow
|
from ..db import connect, default_user_id, utcnow
|
||||||
from . import rtorrent
|
from . import rtorrent
|
||||||
from .preferences import active_profile, get_profile
|
from .preferences import active_profile, get_profile
|
||||||
@@ -20,6 +20,14 @@ def _ts(value: str | None) -> float:
|
|||||||
return 0.0
|
return 0.0
|
||||||
|
|
||||||
|
|
||||||
|
def _int_setting(data: dict[str, Any], current: dict[str, Any], key: str, default: int, minimum: int = 0) -> int:
|
||||||
|
raw = data.get(key) if key in data else current.get(key)
|
||||||
|
try:
|
||||||
|
return max(minimum, int(raw if raw is not None and raw != '' else default))
|
||||||
|
except (TypeError, ValueError):
|
||||||
|
return max(minimum, int(default))
|
||||||
|
|
||||||
|
|
||||||
def _default_settings(user_id: int, profile_id: int) -> dict[str, Any]:
|
def _default_settings(user_id: int, profile_id: int) -> dict[str, Any]:
|
||||||
return {
|
return {
|
||||||
'user_id': user_id,
|
'user_id': user_id,
|
||||||
@@ -29,6 +37,7 @@ def _default_settings(user_id: int, profile_id: int) -> dict[str, Any]:
|
|||||||
'stalled_seconds': 300,
|
'stalled_seconds': 300,
|
||||||
'min_speed_bytes': 1024,
|
'min_speed_bytes': 1024,
|
||||||
'min_seeds': 1,
|
'min_seeds': 1,
|
||||||
|
'min_peers': 0,
|
||||||
'manage_stopped': 0,
|
'manage_stopped': 0,
|
||||||
'updated_at': utcnow(),
|
'updated_at': utcnow(),
|
||||||
}
|
}
|
||||||
@@ -49,27 +58,30 @@ def save_settings(profile_id: int, data: dict[str, Any], user_id: int | None = N
|
|||||||
current = get_settings(profile_id, user_id)
|
current = get_settings(profile_id, user_id)
|
||||||
settings = {
|
settings = {
|
||||||
'enabled': 1 if data.get('enabled', current.get('enabled')) else 0,
|
'enabled': 1 if data.get('enabled', current.get('enabled')) else 0,
|
||||||
'max_active_downloads': max(1, int(data.get('max_active_downloads') or current.get('max_active_downloads') or 5)),
|
'max_active_downloads': _int_setting(data, current, 'max_active_downloads', 5, 1),
|
||||||
'stalled_seconds': max(30, int(data.get('stalled_seconds') or current.get('stalled_seconds') or 300)),
|
'stalled_seconds': _int_setting(data, current, 'stalled_seconds', 300, 30),
|
||||||
'min_speed_bytes': max(0, int(data.get('min_speed_bytes') or current.get('min_speed_bytes') or 0)),
|
'min_speed_bytes': _int_setting(data, current, 'min_speed_bytes', 0, 0),
|
||||||
'min_seeds': max(0, int(data.get('min_seeds') or current.get('min_seeds') or 0)),
|
'min_seeds': _int_setting(data, current, 'min_seeds', 0, 0),
|
||||||
|
# Note: Min peers is optional; when set, stalled detection requires low speed, low seeds and low peers.
|
||||||
|
'min_peers': _int_setting(data, current, 'min_peers', 0, 0),
|
||||||
# Note: This switch protects fully stopped torrents from automatic starts; by default Smart Queue manages only paused items.
|
# Note: This switch protects fully stopped torrents from automatic starts; by default Smart Queue manages only paused items.
|
||||||
'manage_stopped': 1 if data.get('manage_stopped', current.get('manage_stopped')) else 0,
|
'manage_stopped': 1 if data.get('manage_stopped', current.get('manage_stopped')) else 0,
|
||||||
}
|
}
|
||||||
now = utcnow()
|
now = utcnow()
|
||||||
with connect() as conn:
|
with connect() as conn:
|
||||||
conn.execute(
|
conn.execute(
|
||||||
'''INSERT INTO smart_queue_settings(user_id,profile_id,enabled,max_active_downloads,stalled_seconds,min_speed_bytes,min_seeds,manage_stopped,updated_at)
|
'''INSERT INTO smart_queue_settings(user_id,profile_id,enabled,max_active_downloads,stalled_seconds,min_speed_bytes,min_seeds,min_peers,manage_stopped,updated_at)
|
||||||
VALUES(?,?,?,?,?,?,?,?,?)
|
VALUES(?,?,?,?,?,?,?,?,?,?)
|
||||||
ON CONFLICT(user_id, profile_id) DO UPDATE SET
|
ON CONFLICT(user_id, profile_id) DO UPDATE SET
|
||||||
enabled=excluded.enabled,
|
enabled=excluded.enabled,
|
||||||
max_active_downloads=excluded.max_active_downloads,
|
max_active_downloads=excluded.max_active_downloads,
|
||||||
stalled_seconds=excluded.stalled_seconds,
|
stalled_seconds=excluded.stalled_seconds,
|
||||||
min_speed_bytes=excluded.min_speed_bytes,
|
min_speed_bytes=excluded.min_speed_bytes,
|
||||||
min_seeds=excluded.min_seeds,
|
min_seeds=excluded.min_seeds,
|
||||||
|
min_peers=excluded.min_peers,
|
||||||
manage_stopped=excluded.manage_stopped,
|
manage_stopped=excluded.manage_stopped,
|
||||||
updated_at=excluded.updated_at''',
|
updated_at=excluded.updated_at''',
|
||||||
(user_id, profile_id, settings['enabled'], settings['max_active_downloads'], settings['stalled_seconds'], settings['min_speed_bytes'], settings['min_seeds'], settings['manage_stopped'], now),
|
(user_id, profile_id, settings['enabled'], settings['max_active_downloads'], settings['stalled_seconds'], settings['min_speed_bytes'], settings['min_seeds'], settings['min_peers'], settings['manage_stopped'], now),
|
||||||
)
|
)
|
||||||
return get_settings(profile_id, user_id)
|
return get_settings(profile_id, user_id)
|
||||||
|
|
||||||
@@ -132,6 +144,60 @@ def _excluded_hashes(profile_id: int, user_id: int) -> set[str]:
|
|||||||
return {r['torrent_hash'] for r in list_exclusions(profile_id, user_id)}
|
return {r['torrent_hash'] for r in list_exclusions(profile_id, user_id)}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
def _label_names(value: str | None) -> list[str]:
|
||||||
|
names: list[str] = []
|
||||||
|
for part in str(value or '').replace(';', ',').replace('|', ',').split(','):
|
||||||
|
label = part.strip()
|
||||||
|
if label and label not in names:
|
||||||
|
names.append(label)
|
||||||
|
return names
|
||||||
|
|
||||||
|
|
||||||
|
def _label_value(labels: list[str]) -> str:
|
||||||
|
output: list[str] = []
|
||||||
|
for label in labels:
|
||||||
|
item = str(label or '').strip()
|
||||||
|
if item and item not in output:
|
||||||
|
output.append(item)
|
||||||
|
return ', '.join(output)
|
||||||
|
|
||||||
|
|
||||||
|
def _has_smart_queue_label(value: str | None) -> bool:
|
||||||
|
return SMART_QUEUE_LABEL in _label_names(value)
|
||||||
|
|
||||||
|
|
||||||
|
def _without_smart_queue_label(value: str | None) -> str:
|
||||||
|
return _label_value([label for label in _label_names(value) if label != SMART_QUEUE_LABEL])
|
||||||
|
|
||||||
|
|
||||||
|
def _has_stalled_label(value: str | None) -> bool:
|
||||||
|
# Note: Stalled is treated case-insensitively so manually edited labels still block Smart Queue.
|
||||||
|
target = SMART_QUEUE_STALLED_LABEL.casefold()
|
||||||
|
return any(label.casefold() == target for label in _label_names(value))
|
||||||
|
|
||||||
|
|
||||||
|
def _without_queue_technical_labels(value: str | None) -> str:
|
||||||
|
return _label_value([label for label in _label_names(value) if label != SMART_QUEUE_LABEL])
|
||||||
|
|
||||||
|
|
||||||
|
def _ensure_stalled_label(client: Any, torrent_hash: str, current_label: str = '') -> bool:
|
||||||
|
labels = [label for label in _label_names(current_label) if label != SMART_QUEUE_LABEL]
|
||||||
|
changed = False
|
||||||
|
if not any(label.casefold() == SMART_QUEUE_STALLED_LABEL.casefold() for label in labels):
|
||||||
|
labels.append(SMART_QUEUE_STALLED_LABEL)
|
||||||
|
changed = True
|
||||||
|
if SMART_QUEUE_LABEL in _label_names(current_label):
|
||||||
|
changed = True
|
||||||
|
if not changed:
|
||||||
|
return True
|
||||||
|
try:
|
||||||
|
# Note: Stalled marking is idempotent; it adds Stalled and removes only the Smart Queue technical marker.
|
||||||
|
client.call('d.custom1.set', torrent_hash, _label_value(labels))
|
||||||
|
return True
|
||||||
|
except Exception:
|
||||||
|
return False
|
||||||
|
|
||||||
def _remember_auto_label(profile_id: int, torrent_hash: str, previous_label: str) -> None:
|
def _remember_auto_label(profile_id: int, torrent_hash: str, previous_label: str) -> None:
|
||||||
now = utcnow()
|
now = utcnow()
|
||||||
with connect() as conn:
|
with connect() as conn:
|
||||||
@@ -166,19 +232,18 @@ def _restore_auto_label(client: Any, profile_id: int, torrent_hash: str, current
|
|||||||
).fetchone()
|
).fetchone()
|
||||||
live_label = _read_label(client, torrent_hash, current_label or '')
|
live_label = _read_label(client, torrent_hash, current_label or '')
|
||||||
if not row:
|
if not row:
|
||||||
if live_label != SMART_QUEUE_LABEL:
|
if not _has_smart_queue_label(live_label):
|
||||||
return False
|
return False
|
||||||
try:
|
try:
|
||||||
# Note: Clear the Smart Queue label even when the torrent was marked earlier but no previous-label entry remains.
|
# Note: Remove only the Smart Queue technical label and keep every user label untouched.
|
||||||
client.call('d.custom1.set', torrent_hash, '')
|
client.call('d.custom1.set', torrent_hash, _without_smart_queue_label(live_label))
|
||||||
return True
|
return True
|
||||||
except Exception:
|
except Exception:
|
||||||
return False
|
return False
|
||||||
previous = row.get('previous_label') or ''
|
|
||||||
try:
|
try:
|
||||||
# Note: On resume, Smart Queue restores the previous label only while it still sees its own technical label.
|
# Note: Starting a torrent removes only Smart Queue's technical marker, so labels added while paused stay untouched.
|
||||||
if live_label == SMART_QUEUE_LABEL or current_label is None:
|
if _has_smart_queue_label(live_label):
|
||||||
client.call('d.custom1.set', torrent_hash, previous)
|
client.call('d.custom1.set', torrent_hash, _without_smart_queue_label(live_label))
|
||||||
conn.execute('DELETE FROM smart_queue_auto_labels WHERE profile_id=? AND torrent_hash=?', (profile_id, torrent_hash))
|
conn.execute('DELETE FROM smart_queue_auto_labels WHERE profile_id=? AND torrent_hash=?', (profile_id, torrent_hash))
|
||||||
return True
|
return True
|
||||||
except Exception:
|
except Exception:
|
||||||
@@ -282,10 +347,16 @@ def _read_live_start_state(client: Any, torrent_hash: str) -> dict[str, Any]:
|
|||||||
result['started'] = bool(int(result.get('active') or 0))
|
result['started'] = bool(int(result.get('active') or 0))
|
||||||
return result
|
return result
|
||||||
|
|
||||||
def _set_smart_queue_label(client: Any, torrent_hash: str, attempts: int = 3) -> bool:
|
def _set_smart_queue_label(client: Any, torrent_hash: str, current_label: str = '', attempts: int = 3) -> bool:
|
||||||
|
labels = _label_names(current_label)
|
||||||
|
if SMART_QUEUE_LABEL in labels:
|
||||||
|
return True
|
||||||
|
labels.append(SMART_QUEUE_LABEL)
|
||||||
|
value = _label_value(labels)
|
||||||
for attempt in range(max(1, attempts)):
|
for attempt in range(max(1, attempts)):
|
||||||
try:
|
try:
|
||||||
client.call('d.custom1.set', torrent_hash, SMART_QUEUE_LABEL)
|
# Note: Smart Queue appends its technical label instead of overwriting existing torrent labels.
|
||||||
|
client.call('d.custom1.set', torrent_hash, value)
|
||||||
return True
|
return True
|
||||||
except Exception:
|
except Exception:
|
||||||
if attempt < attempts - 1:
|
if attempt < attempts - 1:
|
||||||
@@ -298,15 +369,17 @@ def _mark_auto_paused(client: Any, profile_id: int, torrent: dict[str, Any]) ->
|
|||||||
if not torrent_hash:
|
if not torrent_hash:
|
||||||
return False
|
return False
|
||||||
previous = str(torrent.get('label') or '')
|
previous = str(torrent.get('label') or '')
|
||||||
if previous != SMART_QUEUE_LABEL:
|
if not _has_smart_queue_label(previous):
|
||||||
_remember_auto_label(profile_id, torrent_hash, previous)
|
_remember_auto_label(profile_id, torrent_hash, previous)
|
||||||
return _set_smart_queue_label(client, torrent_hash)
|
return _set_smart_queue_label(client, torrent_hash, previous)
|
||||||
|
|
||||||
|
|
||||||
def _is_smart_queue_hold(torrent: dict[str, Any] | None, manage_stopped: bool = True) -> bool:
|
def _is_smart_queue_hold(torrent: dict[str, Any] | None, manage_stopped: bool = True) -> bool:
|
||||||
if not torrent or int(torrent.get('complete') or 0):
|
if not torrent or int(torrent.get('complete') or 0):
|
||||||
return False
|
return False
|
||||||
if str(torrent.get('label') or '') == SMART_QUEUE_LABEL:
|
if _has_stalled_label(str(torrent.get('label') or '')):
|
||||||
|
return False
|
||||||
|
if _has_smart_queue_label(str(torrent.get('label') or '')):
|
||||||
return True
|
return True
|
||||||
# Note: Paused in rTorrent usually has state=1 and active=0, so state=0 must not be required.
|
# Note: Paused in rTorrent usually has state=1 and active=0, so state=0 must not be required.
|
||||||
# This lets Smart Queue treat paused torrents as pending and fill the queue target later.
|
# This lets Smart Queue treat paused torrents as pending and fill the queue target later.
|
||||||
@@ -319,11 +392,11 @@ def _is_smart_queue_hold(torrent: dict[str, Any] | None, manage_stopped: bool =
|
|||||||
|
|
||||||
|
|
||||||
def _clear_untracked_smart_queue_label(client: Any, torrent_hash: str, current_label: str) -> bool:
|
def _clear_untracked_smart_queue_label(client: Any, torrent_hash: str, current_label: str) -> bool:
|
||||||
if current_label != SMART_QUEUE_LABEL:
|
if not _has_smart_queue_label(current_label):
|
||||||
return False
|
return False
|
||||||
try:
|
try:
|
||||||
# Note: Clear an orphaned Smart Queue label when no previous-label entry exists in the database.
|
# Note: Clear only the orphaned Smart Queue marker and keep unrelated labels intact.
|
||||||
client.call('d.custom1.set', torrent_hash, '')
|
client.call('d.custom1.set', torrent_hash, _without_smart_queue_label(current_label))
|
||||||
return True
|
return True
|
||||||
except Exception:
|
except Exception:
|
||||||
return False
|
return False
|
||||||
@@ -346,8 +419,8 @@ def _cleanup_auto_labels(client: Any, profile_id: int, torrents: list[dict[str,
|
|||||||
if _restore_auto_label(client, profile_id, h, None if t is None else current_label):
|
if _restore_auto_label(client, profile_id, h, None if t is None else current_label):
|
||||||
restored.append(h)
|
restored.append(h)
|
||||||
continue
|
continue
|
||||||
if current_label != SMART_QUEUE_LABEL:
|
if not _has_smart_queue_label(current_label):
|
||||||
_set_smart_queue_label(client, h)
|
_set_smart_queue_label(client, h, current_label)
|
||||||
|
|
||||||
for h, t in by_hash.items():
|
for h, t in by_hash.items():
|
||||||
if not h or h in keep_hashes or h in tracked_hashes or _is_smart_queue_hold(t, manage_stopped):
|
if not h or h in keep_hashes or h in tracked_hashes or _is_smart_queue_hold(t, manage_stopped):
|
||||||
@@ -363,7 +436,7 @@ def _is_running_download_slot(t: dict[str, Any]) -> bool:
|
|||||||
# Paused can have state=1/open=1, so a slot is counted only after d.is_active=1.
|
# Paused can have state=1/open=1, so a slot is counted only after d.is_active=1.
|
||||||
if int(t.get('complete') or 0):
|
if int(t.get('complete') or 0):
|
||||||
return False
|
return False
|
||||||
if str(t.get('label') or '') == SMART_QUEUE_LABEL:
|
if _has_smart_queue_label(str(t.get('label') or '')) or _has_stalled_label(str(t.get('label') or '')):
|
||||||
return False
|
return False
|
||||||
status = str(t.get('status') or '').lower()
|
status = str(t.get('status') or '').lower()
|
||||||
if status == 'checking' or status == 'paused' or bool(t.get('paused')):
|
if status == 'checking' or status == 'paused' or bool(t.get('paused')):
|
||||||
@@ -375,7 +448,9 @@ def _is_waiting_download_candidate(t: dict[str, Any], manage_stopped: bool) -> b
|
|||||||
"""Return True for paused/held torrents Smart Queue may resume later."""
|
"""Return True for paused/held torrents Smart Queue may resume later."""
|
||||||
if int(t.get('complete') or 0):
|
if int(t.get('complete') or 0):
|
||||||
return False
|
return False
|
||||||
if str(t.get('label') or '') == SMART_QUEUE_LABEL:
|
if _has_stalled_label(str(t.get('label') or '')):
|
||||||
|
return False
|
||||||
|
if _has_smart_queue_label(str(t.get('label') or '')):
|
||||||
return True
|
return True
|
||||||
# Note: Paused items are the primary source for filling the queue, regardless of manage_stopped.
|
# Note: Paused items are the primary source for filling the queue, regardless of manage_stopped.
|
||||||
if bool(t.get('paused')) or str(t.get('status') or '').lower() == 'paused':
|
if bool(t.get('paused')) or str(t.get('status') or '').lower() == 'paused':
|
||||||
@@ -403,10 +478,12 @@ def check(profile: dict | None = None, user_id: int | None = None, force: bool =
|
|||||||
return {'ok': True, 'enabled': False, 'paused': [], 'resumed': [], 'labels_restored': restored, 'message': 'Smart Queue disabled'}
|
return {'ok': True, 'enabled': False, 'paused': [], 'resumed': [], 'labels_restored': restored, 'message': 'Smart Queue disabled'}
|
||||||
|
|
||||||
torrents = rtorrent.list_torrents(profile)
|
torrents = rtorrent.list_torrents(profile)
|
||||||
excluded = _excluded_hashes(profile_id, user_id)
|
# Note: Torrents marked as Stalled are treated as queue-blocked even when there are no other pending downloads.
|
||||||
|
stalled_label_hashes = {str(t.get('hash') or '') for t in torrents if _has_stalled_label(str(t.get('label') or '')) and t.get('hash')}
|
||||||
|
excluded = _excluded_hashes(profile_id, user_id) | stalled_label_hashes
|
||||||
manage_stopped = bool(settings.get('manage_stopped'))
|
manage_stopped = bool(settings.get('manage_stopped'))
|
||||||
def is_managed_hold(t: dict[str, Any]) -> bool:
|
def is_managed_hold(t: dict[str, Any]) -> bool:
|
||||||
return str(t.get('label') or '') == SMART_QUEUE_LABEL
|
return _has_smart_queue_label(str(t.get('label') or ''))
|
||||||
|
|
||||||
# Note: Count Smart Queue slots by d.is_active because Paused can have state=1/open=1 and must not occupy the limit.
|
# Note: Count Smart Queue slots by d.is_active because Paused can have state=1/open=1 and must not occupy the limit.
|
||||||
downloading = [
|
downloading = [
|
||||||
@@ -425,6 +502,7 @@ def check(profile: dict | None = None, user_id: int | None = None, force: bool =
|
|||||||
]
|
]
|
||||||
min_speed = int(settings.get('min_speed_bytes') or 0)
|
min_speed = int(settings.get('min_speed_bytes') or 0)
|
||||||
min_seeds = int(settings.get('min_seeds') or 0)
|
min_seeds = int(settings.get('min_seeds') or 0)
|
||||||
|
min_peers = int(settings.get('min_peers') or 0)
|
||||||
stalled_seconds = int(settings.get('stalled_seconds') or 300)
|
stalled_seconds = int(settings.get('stalled_seconds') or 300)
|
||||||
now = utcnow()
|
now = utcnow()
|
||||||
now_ts = datetime.now(timezone.utc).timestamp()
|
now_ts = datetime.now(timezone.utc).timestamp()
|
||||||
@@ -432,7 +510,8 @@ def check(profile: dict | None = None, user_id: int | None = None, force: bool =
|
|||||||
|
|
||||||
with connect() as conn:
|
with connect() as conn:
|
||||||
for t in downloading:
|
for t in downloading:
|
||||||
is_stalled = int(t.get('down_rate') or 0) <= min_speed and int(t.get('seeds') or 0) <= min_seeds
|
# Note: Stalled detection requires low speed plus low seeds and, when configured, low peers.
|
||||||
|
is_stalled = int(t.get('down_rate') or 0) <= min_speed and int(t.get('seeds') or 0) <= min_seeds and (min_peers <= 0 or int(t.get('peers') or 0) <= min_peers)
|
||||||
h = t.get('hash')
|
h = t.get('hash')
|
||||||
if not h:
|
if not h:
|
||||||
continue
|
continue
|
||||||
@@ -472,13 +551,12 @@ def check(profile: dict | None = None, user_id: int | None = None, force: bool =
|
|||||||
to_pause: list[dict[str, Any]] = pause_rank[:max(0, len(downloading) - max_active)]
|
to_pause: list[dict[str, Any]] = pause_rank[:max(0, len(downloading) - max_active)]
|
||||||
pause_hashes = {str(t.get('hash') or '') for t in to_pause}
|
pause_hashes = {str(t.get('hash') or '') for t in to_pause}
|
||||||
|
|
||||||
# Note: Stalled rotation runs only when the queue is full. When slots are missing, Smart Queue should
|
# Note: Confirmed stalled downloads are removed from the active queue immediately, then new candidates can fill those slots.
|
||||||
# first add missing items instead of pausing existing or incorrectly detected stalled items.
|
for t in stalled:
|
||||||
if candidates and len(downloading) >= max_active:
|
h = str(t.get('hash') or '')
|
||||||
replaceable_stalled = [t for t in stalled if str(t.get('hash') or '') not in pause_hashes]
|
if h and h not in pause_hashes:
|
||||||
for t in replaceable_stalled[:max(0, len(candidates) - len(to_pause))]:
|
|
||||||
to_pause.append(t)
|
to_pause.append(t)
|
||||||
pause_hashes.add(str(t.get('hash') or ''))
|
pause_hashes.add(h)
|
||||||
|
|
||||||
active_after_pause = max(0, len(downloading) - len(to_pause))
|
active_after_pause = max(0, len(downloading) - len(to_pause))
|
||||||
available_slots = max(0, max_active - active_after_pause)
|
available_slots = max(0, max_active - active_after_pause)
|
||||||
@@ -491,6 +569,7 @@ def check(profile: dict | None = None, user_id: int | None = None, force: bool =
|
|||||||
paused: list[str] = []
|
paused: list[str] = []
|
||||||
resumed: list[str] = []
|
resumed: list[str] = []
|
||||||
label_failed: list[str] = []
|
label_failed: list[str] = []
|
||||||
|
stalled_labeled: list[str] = []
|
||||||
start_failed: list[dict[str, str]] = []
|
start_failed: list[dict[str, str]] = []
|
||||||
start_no_effect: list[dict[str, Any]] = []
|
start_no_effect: list[dict[str, Any]] = []
|
||||||
resume_requested: list[str] = []
|
resume_requested: list[str] = []
|
||||||
@@ -498,12 +577,18 @@ def check(profile: dict | None = None, user_id: int | None = None, force: bool =
|
|||||||
|
|
||||||
for t in to_pause:
|
for t in to_pause:
|
||||||
try:
|
try:
|
||||||
pause_result = rtorrent.pause_hash(c, t['hash'])
|
h = str(t.get('hash') or '')
|
||||||
|
pause_result = rtorrent.pause_hash(c, h)
|
||||||
if not pause_result.get('ok'):
|
if not pause_result.get('ok'):
|
||||||
raise RuntimeError(pause_result.get('error') or 'pause failed')
|
raise RuntimeError(pause_result.get('error') or 'pause failed')
|
||||||
if not _mark_auto_paused(c, profile_id, t):
|
if h in stalled_hashes:
|
||||||
label_failed.append(t['hash'])
|
if _ensure_stalled_label(c, h, _read_label(c, h, str(t.get('label') or ''))):
|
||||||
paused.append(t['hash'])
|
stalled_labeled.append(h)
|
||||||
|
else:
|
||||||
|
label_failed.append(h)
|
||||||
|
elif not _mark_auto_paused(c, profile_id, t):
|
||||||
|
label_failed.append(h)
|
||||||
|
paused.append(h)
|
||||||
except Exception:
|
except Exception:
|
||||||
pass
|
pass
|
||||||
|
|
||||||
@@ -538,9 +623,9 @@ def check(profile: dict | None = None, user_id: int | None = None, force: bool =
|
|||||||
keep_labels = (
|
keep_labels = (
|
||||||
set(paused)
|
set(paused)
|
||||||
| {str(t.get('hash') or '') for t in to_label_waiting}
|
| {str(t.get('hash') or '') for t in to_label_waiting}
|
||||||
| {str(t.get('hash') or '') for t in stopped if str(t.get('label') or '') == SMART_QUEUE_LABEL and str(t.get('hash') or '') not in set(resumed)}
|
| {str(t.get('hash') or '') for t in stopped if _has_smart_queue_label(str(t.get('label') or '')) and str(t.get('hash') or '') not in set(resumed)}
|
||||||
)
|
)
|
||||||
restored = _cleanup_auto_labels(c, profile_id, torrents, keep_labels, manage_stopped)
|
restored = _cleanup_auto_labels(c, profile_id, torrents, keep_labels, manage_stopped)
|
||||||
details = {'excluded': len(excluded), 'enabled': bool(settings.get('enabled')), 'auto_label': SMART_QUEUE_LABEL, 'labels_restored': restored, 'labels_failed': label_failed, 'start_failed': start_failed, 'start_no_effect': start_no_effect, 'start_results': start_results, 'resume_requested': resume_requested, 'active_verified': active_verified, 'waiting_labeled': len(to_label_waiting), 'manage_stopped': manage_stopped, 'max_active_downloads': max_active, 'active_before': len(downloading), 'active_after_expected': active_after_pause + len(resumed), 'paused_planned': len(to_pause), 'resumed_planned': len(to_resume), 'rtorrent_cap': rtorrent_cap}
|
details = {'excluded': len(excluded), 'excluded_stalled': len(stalled_label_hashes), 'enabled': bool(settings.get('enabled')), 'auto_label': SMART_QUEUE_LABEL, 'stalled_label': SMART_QUEUE_STALLED_LABEL, 'stalled_labeled': stalled_labeled, 'labels_restored': restored, 'labels_failed': label_failed, 'start_failed': start_failed, 'start_no_effect': start_no_effect, 'start_results': start_results, 'resume_requested': resume_requested, 'active_verified': active_verified, 'waiting_labeled': len(to_label_waiting), 'manage_stopped': manage_stopped, 'max_active_downloads': max_active, 'active_before': len(downloading), 'active_after_expected': active_after_pause + len(resumed), 'paused_planned': len(to_pause), 'resumed_planned': len(to_resume), 'rtorrent_cap': rtorrent_cap}
|
||||||
add_history(profile_id, 'force_check' if force else 'auto_check', paused, resumed, len(torrents), details, user_id)
|
add_history(profile_id, 'force_check' if force else 'auto_check', paused, resumed, len(torrents), details, user_id)
|
||||||
return {'ok': True, 'enabled': bool(settings.get('enabled')), 'paused': paused, 'resumed': resumed, 'resume_requested': resume_requested, 'waiting_labeled': len(to_label_waiting), 'labels_restored': restored, 'labels_failed': label_failed, 'start_failed': start_failed, 'start_no_effect': start_no_effect, 'active_verified': active_verified, 'rtorrent_cap': rtorrent_cap, 'checked': len(torrents), 'excluded': len(excluded), 'settings': settings}
|
return {'ok': True, 'enabled': bool(settings.get('enabled')), 'paused': paused, 'resumed': resumed, 'resume_requested': resume_requested, 'waiting_labeled': len(to_label_waiting), 'stalled_labeled': stalled_labeled, 'excluded_stalled': len(stalled_label_hashes), 'labels_restored': restored, 'labels_failed': label_failed, 'start_failed': start_failed, 'start_no_effect': start_no_effect, 'active_verified': active_verified, 'rtorrent_cap': rtorrent_cap, 'checked': len(torrents), 'excluded': len(excluded), 'settings': settings}
|
||||||
|
|||||||
@@ -54,25 +54,33 @@ def _job_row(job_id: str):
|
|||||||
return conn.execute("SELECT rowid AS _rowid, * FROM jobs WHERE id=?", (job_id,)).fetchone()
|
return conn.execute("SELECT rowid AS _rowid, * FROM jobs WHERE id=?", (job_id,)).fetchone()
|
||||||
|
|
||||||
|
|
||||||
def _is_ordered_action(action_name: str) -> bool:
|
def _job_payload(row) -> dict:
|
||||||
return action_name in {"move", "remove"}
|
try:
|
||||||
|
return json.loads((row or {}).get("payload_json") or "{}")
|
||||||
|
except Exception:
|
||||||
|
return {}
|
||||||
|
|
||||||
|
|
||||||
|
def _is_ordered_job(row) -> bool:
|
||||||
|
payload = _job_payload(row)
|
||||||
|
# Note: Move/remove remain ordered, and automation-created jobs can opt in so effect order is visible and predictable.
|
||||||
|
return str((row or {}).get("action") or "") in {"move", "remove"} or bool(payload.get("automation_ordered"))
|
||||||
|
|
||||||
|
|
||||||
def _has_prior_ordered_jobs(profile_id: int, rowid: int) -> bool:
|
def _has_prior_ordered_jobs(profile_id: int, rowid: int) -> bool:
|
||||||
with connect() as conn:
|
with connect() as conn:
|
||||||
row = conn.execute(
|
rows = conn.execute(
|
||||||
"""
|
"""
|
||||||
SELECT 1
|
SELECT rowid AS _rowid, action, payload_json
|
||||||
FROM jobs
|
FROM jobs
|
||||||
WHERE profile_id=?
|
WHERE profile_id=?
|
||||||
AND rowid<?
|
AND rowid<?
|
||||||
AND action IN ('move', 'remove')
|
|
||||||
AND status IN ('pending', 'running')
|
AND status IN ('pending', 'running')
|
||||||
LIMIT 1
|
ORDER BY rowid
|
||||||
""",
|
""",
|
||||||
(profile_id, rowid),
|
(profile_id, rowid),
|
||||||
).fetchone()
|
).fetchall()
|
||||||
return bool(row)
|
return any(_is_ordered_job(row) for row in rows)
|
||||||
|
|
||||||
|
|
||||||
def _wait_for_prior_ordered_jobs(job_id: str, profile_id: int, rowid: int) -> bool:
|
def _wait_for_prior_ordered_jobs(job_id: str, profile_id: int, rowid: int) -> bool:
|
||||||
@@ -140,7 +148,7 @@ def _run(job_id: str):
|
|||||||
return
|
return
|
||||||
profile_id = int(profile["id"])
|
profile_id = int(profile["id"])
|
||||||
ordered_lock = None
|
ordered_lock = None
|
||||||
if _is_ordered_action(str(job["action"])):
|
if _is_ordered_job(job):
|
||||||
if not _wait_for_prior_ordered_jobs(job_id, profile_id, int(job["_rowid"])):
|
if not _wait_for_prior_ordered_jobs(job_id, profile_id, int(job["_rowid"])):
|
||||||
return
|
return
|
||||||
ordered_lock = _get_exclusive_lock(profile_id)
|
ordered_lock = _get_exclusive_lock(profile_id)
|
||||||
|
|||||||
File diff suppressed because one or more lines are too long
@@ -1238,36 +1238,60 @@ body.mobile-mode .mobile-card {
|
|||||||
color: var(--bs-primary-text-emphasis);
|
color: var(--bs-primary-text-emphasis);
|
||||||
}
|
}
|
||||||
|
|
||||||
.automation-form-grid {
|
.automation-shell {
|
||||||
|
display: grid;
|
||||||
|
gap: 0.75rem;
|
||||||
|
}
|
||||||
|
.automation-main-card {
|
||||||
|
padding: 0.75rem;
|
||||||
|
border: 1px solid var(--bs-border-color);
|
||||||
|
border-radius: 0.75rem;
|
||||||
|
background: var(--bs-body-bg);
|
||||||
|
}
|
||||||
|
.automation-card-title {
|
||||||
|
margin-bottom: 0.5rem;
|
||||||
|
font-weight: 700;
|
||||||
|
}
|
||||||
|
.automation-rule-grid,
|
||||||
|
.automation-builder-grid {
|
||||||
display: grid;
|
display: grid;
|
||||||
grid-template-columns: repeat(4, minmax(160px, 1fr));
|
grid-template-columns: repeat(4, minmax(160px, 1fr));
|
||||||
gap: 0.5rem;
|
gap: 0.5rem;
|
||||||
align-items: center;
|
align-items: center;
|
||||||
}
|
}
|
||||||
|
.automation-enabled,
|
||||||
.auto-move-option {
|
.automation-negate {
|
||||||
gap: 0.45rem;
|
|
||||||
margin: 0;
|
margin: 0;
|
||||||
}
|
padding: 0.45rem 0.6rem 0.45rem 2.5rem;
|
||||||
|
|
||||||
|
|
||||||
.automation-builder-list {
|
|
||||||
display: grid;
|
|
||||||
grid-column: 1 / -1;
|
|
||||||
gap: 0.4rem;
|
|
||||||
}
|
|
||||||
|
|
||||||
.automation-chip {
|
|
||||||
display: flex;
|
|
||||||
align-items: center;
|
|
||||||
justify-content: space-between;
|
|
||||||
gap: 0.5rem;
|
|
||||||
padding: 0.4rem 0.55rem;
|
|
||||||
border: 1px solid var(--bs-border-color);
|
border: 1px solid var(--bs-border-color);
|
||||||
border-radius: 0.55rem;
|
border-radius: 0.5rem;
|
||||||
background: var(--bs-secondary-bg);
|
}
|
||||||
|
.automation-path-input {
|
||||||
|
grid-column: span 2;
|
||||||
|
}
|
||||||
|
.automation-chip-list {
|
||||||
|
display: flex;
|
||||||
|
flex-wrap: wrap;
|
||||||
|
gap: 0.45rem;
|
||||||
|
}
|
||||||
|
.automation-chip {
|
||||||
|
display: inline-flex;
|
||||||
|
align-items: center;
|
||||||
|
gap: 0.35rem;
|
||||||
|
max-width: 100%;
|
||||||
|
padding: 0.25rem 0.5rem;
|
||||||
|
border: 1px solid var(--bs-border-color);
|
||||||
|
border-radius: 999px;
|
||||||
|
background: var(--bs-tertiary-bg);
|
||||||
|
font-size: 0.82rem;
|
||||||
|
}
|
||||||
|
.automation-actions,
|
||||||
|
.automation-row-actions {
|
||||||
|
display: flex;
|
||||||
|
flex-wrap: wrap;
|
||||||
|
gap: 0.4rem;
|
||||||
|
align-items: center;
|
||||||
}
|
}
|
||||||
|
|
||||||
.automation-row {
|
.automation-row {
|
||||||
display: flex;
|
display: flex;
|
||||||
justify-content: space-between;
|
justify-content: space-between;
|
||||||
@@ -1279,10 +1303,109 @@ body.mobile-mode .mobile-card {
|
|||||||
margin-bottom: 0.45rem;
|
margin-bottom: 0.45rem;
|
||||||
background: var(--bs-body-bg);
|
background: var(--bs-body-bg);
|
||||||
}
|
}
|
||||||
|
.automation-row-main {
|
||||||
|
min-width: 0;
|
||||||
|
}
|
||||||
|
.automation-rule-summary {
|
||||||
|
overflow-wrap: anywhere;
|
||||||
|
}
|
||||||
|
.automation-action-pill {
|
||||||
|
display: inline-flex;
|
||||||
|
max-width: 100%;
|
||||||
|
margin: 0.1rem;
|
||||||
|
padding: 0.15rem 0.4rem;
|
||||||
|
border-radius: 999px;
|
||||||
|
background: var(--bs-secondary-bg);
|
||||||
|
font-size: 0.78rem;
|
||||||
|
overflow-wrap: anywhere;
|
||||||
|
white-space: normal;
|
||||||
|
word-break: break-word;
|
||||||
|
}
|
||||||
|
.automation-history-toolbar {
|
||||||
|
display: flex;
|
||||||
|
justify-content: flex-end;
|
||||||
|
margin-bottom: 0.5rem;
|
||||||
|
}
|
||||||
|
/* Note: Automation history has fixed compact metadata columns and a flexible Actions column, so long JSON cannot overlap Time/Rule. */
|
||||||
|
.automation-history-table {
|
||||||
|
width: 100%;
|
||||||
|
table-layout: fixed;
|
||||||
|
white-space: normal;
|
||||||
|
}
|
||||||
|
.automation-history-table th,
|
||||||
|
.automation-history-table td {
|
||||||
|
min-width: 0;
|
||||||
|
vertical-align: top;
|
||||||
|
}
|
||||||
|
.automation-history-table th:nth-child(1),
|
||||||
|
.automation-history-table td:nth-child(1) {
|
||||||
|
width: 9rem;
|
||||||
|
overflow: hidden;
|
||||||
|
text-overflow: ellipsis;
|
||||||
|
white-space: nowrap;
|
||||||
|
}
|
||||||
|
.automation-history-table th:nth-child(2),
|
||||||
|
.automation-history-table td:nth-child(2) {
|
||||||
|
width: 11rem;
|
||||||
|
overflow: hidden;
|
||||||
|
overflow-wrap: anywhere;
|
||||||
|
word-break: break-word;
|
||||||
|
}
|
||||||
|
.automation-history-table th:nth-child(3),
|
||||||
|
.automation-history-table td:nth-child(3) {
|
||||||
|
width: 12rem;
|
||||||
|
overflow: hidden;
|
||||||
|
overflow-wrap: anywhere;
|
||||||
|
word-break: break-word;
|
||||||
|
}
|
||||||
|
.automation-history-table th:nth-child(4),
|
||||||
|
.automation-history-table td:nth-child(4) {
|
||||||
|
width: auto;
|
||||||
|
overflow: hidden;
|
||||||
|
overflow-wrap: anywhere;
|
||||||
|
word-break: break-word;
|
||||||
|
}
|
||||||
|
.automation-history-details {
|
||||||
|
display: block;
|
||||||
|
min-width: 0;
|
||||||
|
max-width: 100%;
|
||||||
|
}
|
||||||
|
.automation-history-details summary {
|
||||||
|
display: block;
|
||||||
|
max-width: 100%;
|
||||||
|
cursor: pointer;
|
||||||
|
list-style-position: inside;
|
||||||
|
overflow-wrap: anywhere;
|
||||||
|
white-space: normal;
|
||||||
|
word-break: break-word;
|
||||||
|
}
|
||||||
|
.automation-history-details pre,
|
||||||
|
.automation-history-raw {
|
||||||
|
max-width: 100%;
|
||||||
|
max-height: 220px;
|
||||||
|
margin: 0.35rem 0 0;
|
||||||
|
padding: 0.5rem;
|
||||||
|
overflow: auto;
|
||||||
|
border: 1px solid var(--bs-border-color);
|
||||||
|
border-radius: 0.5rem;
|
||||||
|
background: var(--bs-tertiary-bg);
|
||||||
|
overflow-wrap: anywhere;
|
||||||
|
white-space: pre-wrap;
|
||||||
|
word-break: break-word;
|
||||||
|
}
|
||||||
@media (max-width: 900px) {
|
@media (max-width: 900px) {
|
||||||
.automation-form-grid {
|
.automation-rule-grid,
|
||||||
|
.automation-builder-grid {
|
||||||
grid-template-columns: 1fr;
|
grid-template-columns: 1fr;
|
||||||
}
|
}
|
||||||
|
.automation-path-input,
|
||||||
|
.automation-history-details {
|
||||||
|
grid-column: auto;
|
||||||
|
max-width: 100%;
|
||||||
|
}
|
||||||
|
.automation-history-toolbar {
|
||||||
|
justify-content: flex-start;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
.disk-status {
|
.disk-status {
|
||||||
display: inline-flex;
|
display: inline-flex;
|
||||||
|
|||||||
File diff suppressed because one or more lines are too long
Reference in New Issue
Block a user