auto scan after download completion and follow with auto incremental update for active server

pull/49/head
Broque Thomas 8 months ago
parent 8cceef0610
commit e06f0ea372

@ -1121,6 +1121,25 @@ class DatabaseUpdateWorker(QThread):
logger.error(f"Error processing artist '{getattr(media_artist, 'title', 'Unknown')}': {e}")
return False, f"Processing error: {str(e)}", 0, 0
def run_with_callback(self, completion_callback=None):
"""
Run the database update with an optional completion callback.
This is used by the web interface for automatic chaining of operations.
"""
try:
# Run the normal update process
self.run()
# Call completion callback if provided
if completion_callback:
try:
completion_callback()
except Exception as e:
logger.error(f"Error in database update completion callback: {e}")
except Exception as e:
logger.error(f"Error in run_with_callback: {e}")
class DatabaseStatsWorker(QThread):
"""Simple worker for getting database statistics without blocking UI"""

@ -113,7 +113,7 @@ class PlexClient:
return
self._find_music_library()
logger.info(f"Successfully connected to Plex server: {self.server.friendlyName}")
logger.debug(f"Successfully connected to Plex server: {self.server.friendlyName}")
except Exception as e:
logger.error(f"Failed to connect to Plex server: {e}")
@ -143,12 +143,12 @@ class PlexClient:
for section in music_sections:
if section.title == priority_name:
self.music_library = section
logger.info(f"Found preferred music library: {section.title}")
logger.debug(f"Found preferred music library: {section.title}")
return
# If no priority match found, use the first one
self.music_library = music_sections[0]
logger.info(f"Found music library (first available): {self.music_library.title}")
logger.debug(f"Found music library (first available): {self.music_library.title}")
# Log other available libraries if multiple exist
if len(music_sections) > 1:

@ -0,0 +1,302 @@
#!/usr/bin/env python3
import threading
import time
from utils.logging_config import get_logger
logger = get_logger("web_scan_manager")
class WebScanManager:
"""
Web-specific media library scan manager with debouncing and callback support.
Designed for Flask web server integration with automatic post-download scanning.
Features:
- Debounces multiple scan requests to prevent spam
- Thread-safe operation with Flask
- Works with Plex, Jellyfin, and Navidrome
- Scan completion callbacks for chained operations
- Progress tracking and status reporting
"""
def __init__(self, media_clients, delay_seconds: int = 60):
"""
Initialize the web scan manager.
Args:
media_clients: Dict containing plex_client, jellyfin_client, navidrome_client
delay_seconds: Debounce delay in seconds (default 60s)
"""
self.delay = delay_seconds
self.media_clients = media_clients
self._timer = None
self._scan_in_progress = False
self._downloads_during_scan = False
self._lock = threading.Lock()
self._scan_completion_callbacks = []
self._scan_start_time = None
self._max_scan_time = 1800 # 30 minutes maximum
self._current_server_type = None
self._scan_progress = {}
logger.info(f"WebScanManager initialized with {delay_seconds}s debounce delay")
def _get_active_media_client(self):
"""Get the active media client based on config settings"""
try:
from config.settings import config_manager
active_server = config_manager.get_active_media_server()
server_client_map = {
'jellyfin': 'jellyfin_client',
'navidrome': 'navidrome_client',
'plex': 'plex_client'
}
# Try to get the configured active server first
if active_server in server_client_map:
client_key = server_client_map[active_server]
client = self.media_clients.get(client_key)
if client and hasattr(client, 'is_connected') and client.is_connected():
return client, active_server
else:
logger.warning(f"{active_server.title()} client not connected, falling back to Plex")
# Fallback to Plex
plex_client = self.media_clients.get('plex_client')
if plex_client and hasattr(plex_client, 'is_connected') and plex_client.is_connected():
return plex_client, "plex"
logger.error("No active media client available for scanning")
return None, None
except Exception as e:
logger.error(f"Error determining active media server: {e}")
return None, None
def request_scan(self, reason: str = "Download completed", callback=None):
"""
Request a library scan with smart debouncing logic.
Args:
reason: Optional reason for the scan request (for logging)
callback: Optional callback function to call when scan completes
Returns:
dict: Scan request status and timing info
"""
logger.info(f"Web scan requested - reason: {reason}")
with self._lock:
# Add callback if provided
if callback and callback not in self._scan_completion_callbacks:
self._scan_completion_callbacks.append(callback)
if self._scan_in_progress:
# Server is currently scanning - mark that we need another scan later
self._downloads_during_scan = True
logger.info(f"📡 Web scan in progress - queueing follow-up scan ({reason})")
return {
"status": "queued",
"message": "Scan already in progress, queued for later",
"estimated_delay": "after current scan completes"
}
# Cancel any existing timer and start a new one
if self._timer:
self._timer.cancel()
logger.debug(f"⏳ Resetting web scan timer ({reason})")
else:
logger.info(f"⏳ Web scan queued - will execute in {self.delay}s ({reason})")
# Start the debounce timer
self._timer = threading.Timer(self.delay, self._execute_scan)
self._timer.start()
return {
"status": "scheduled",
"message": f"Scan scheduled to start in {self.delay} seconds",
"delay_seconds": self.delay,
"reason": reason
}
def add_scan_completion_callback(self, callback):
"""
Add a callback function to be called when scan completes.
Args:
callback: Function to call when scan completes
"""
with self._lock:
if callback not in self._scan_completion_callbacks:
self._scan_completion_callbacks.append(callback)
logger.info(f"Added web scan completion callback: {callback.__name__}")
def remove_scan_completion_callback(self, callback):
"""Remove a previously registered callback."""
with self._lock:
if callback in self._scan_completion_callbacks:
self._scan_completion_callbacks.remove(callback)
logger.debug(f"Removed web scan completion callback: {callback.__name__}")
def get_scan_status(self):
"""
Get current scan status for web API responses.
Returns:
dict: Current scan status information
"""
with self._lock:
if self._scan_in_progress:
elapsed_time = time.time() - self._scan_start_time if self._scan_start_time else 0
return {
"status": "scanning",
"server_type": self._current_server_type,
"elapsed_seconds": int(elapsed_time),
"max_time_seconds": self._max_scan_time,
"progress": self._scan_progress.copy()
}
elif self._timer:
return {
"status": "scheduled",
"server_type": None,
"delay_remaining": "unknown",
"progress": {}
}
else:
return {
"status": "idle",
"server_type": None,
"progress": {}
}
def _execute_scan(self):
"""Execute the actual media library scan"""
with self._lock:
if self._scan_in_progress:
logger.warning("Web scan already in progress - skipping duplicate execution")
return
self._scan_in_progress = True
self._downloads_during_scan = False
self._timer = None
self._scan_start_time = time.time()
self._scan_progress = {"status": "starting", "message": "Initializing scan"}
# Get the active media client
media_client, server_type = self._get_active_media_client()
if not media_client:
logger.error("❌ No active media client available for web library scan")
self._reset_scan_state()
return
self._current_server_type = server_type
logger.info(f"🎵 Starting {server_type.upper()} library scan via web interface...")
try:
# Update progress
with self._lock:
self._scan_progress = {
"status": "scanning",
"message": f"Triggering {server_type.upper()} library scan"
}
success = media_client.trigger_library_scan()
if success:
logger.info(f"{server_type.upper()} library scan initiated successfully via web")
with self._lock:
self._scan_progress = {
"status": "active",
"message": f"{server_type.upper()} is scanning library"
}
# Start periodic completion checking
self._start_periodic_completion_check()
else:
logger.error(f"❌ Failed to initiate {server_type.upper()} library scan via web")
with self._lock:
self._scan_progress = {
"status": "failed",
"message": f"Failed to start {server_type.upper()} scan"
}
self._reset_scan_state()
except Exception as e:
logger.error(f"❌ Error during {server_type.upper()} library scan via web: {e}")
with self._lock:
self._scan_progress = {
"status": "error",
"message": f"Scan error: {str(e)}"
}
self._reset_scan_state()
def _start_periodic_completion_check(self):
"""Start periodic checking for scan completion"""
def check_completion():
try:
# Check for timeout
if self._scan_start_time and (time.time() - self._scan_start_time) > self._max_scan_time:
logger.warning(f"Web scan timed out after {self._max_scan_time} seconds")
with self._lock:
self._scan_progress = {
"status": "timeout",
"message": "Scan timed out - assuming complete"
}
self._handle_scan_completion()
return
# Use simple time-based completion (5 minutes)
elapsed_time = time.time() - self._scan_start_time if self._scan_start_time else 0
if elapsed_time >= 300: # 5 minutes
logger.info(f"Web scan completion assumed after {elapsed_time:.0f} seconds")
with self._lock:
self._scan_progress = {
"status": "completed",
"message": "Scan completed successfully"
}
self._handle_scan_completion()
else:
# Continue checking
threading.Timer(30, check_completion).start() # Check every 30 seconds
except Exception as e:
logger.error(f"Error during web scan completion check: {e}")
self._reset_scan_state()
# Start first check after 30 seconds
threading.Timer(30, check_completion).start()
def _handle_scan_completion(self):
"""Handle scan completion and trigger callbacks"""
logger.info(f"🏁 Web {self._current_server_type.upper()} library scan completed")
# Call completion callbacks
callbacks_to_call = []
with self._lock:
callbacks_to_call = self._scan_completion_callbacks.copy()
for callback in callbacks_to_call:
try:
logger.info(f"🔄 Calling web scan completion callback: {callback.__name__}")
callback()
except Exception as e:
logger.error(f"Error in web scan completion callback {callback.__name__}: {e}")
# Reset scan state
self._reset_scan_state()
# Check if we need another scan due to downloads during this scan
with self._lock:
if self._downloads_during_scan:
logger.info("🔄 Web scan follow-up needed for downloads during scan")
self.request_scan("Follow-up scan for downloads during previous scan")
def _reset_scan_state(self):
"""Reset internal scan state"""
with self._lock:
self._scan_in_progress = False
self._current_server_type = None
self._scan_start_time = None
self._scan_progress = {}
# Don't clear callbacks - they might be reused

@ -32,6 +32,7 @@ from core.soulseek_client import SoulseekClient
from core.tidal_client import TidalClient # Added import for Tidal
from core.matching_engine import MusicMatchingEngine
from core.database_update_worker import DatabaseUpdateWorker, DatabaseStatsWorker
from core.web_scan_manager import WebScanManager
from database.music_database import get_database
from services.sync_service import PlaylistSyncService
from datetime import datetime
@ -105,10 +106,18 @@ try:
tidal_client = TidalClient()
matching_engine = MusicMatchingEngine()
sync_service = PlaylistSyncService(spotify_client, plex_client, soulseek_client, jellyfin_client, navidrome_client)
print("✅ Core service clients initialized.")
# Initialize web scan manager for automatic post-download scanning
media_clients = {
'plex_client': plex_client,
'jellyfin_client': jellyfin_client,
'navidrome_client': navidrome_client
}
web_scan_manager = WebScanManager(media_clients, delay_seconds=60)
print("✅ Core service clients and scan manager initialized.")
except Exception as e:
print(f"🔴 FATAL: Error initializing service clients: {e}")
spotify_client = plex_client = jellyfin_client = navidrome_client = soulseek_client = tidal_client = matching_engine = sync_service = None
spotify_client = plex_client = jellyfin_client = navidrome_client = soulseek_client = tidal_client = matching_engine = sync_service = web_scan_manager = None
# --- Global Streaming State Management ---
# Thread-safe state tracking for streaming functionality
@ -2305,6 +2314,243 @@ def clear_finished_downloads():
print(f"Error clearing finished downloads: {e}")
return jsonify({"success": False, "error": str(e)}), 500
@app.route('/api/scan/request', methods=['POST'])
def request_media_scan():
"""
Request a media library scan with automatic completion callback support.
"""
try:
if not web_scan_manager:
return jsonify({"success": False, "error": "Scan manager not initialized"}), 500
data = request.get_json() or {}
reason = data.get('reason', 'Web UI download completed')
auto_database_update = data.get('auto_database_update', True)
def scan_completion_callback():
"""Callback to trigger automatic database update after scan completes"""
if auto_database_update:
try:
logger.info("🔄 Starting automatic incremental database update after scan completion")
# Start database update in a separate thread to avoid blocking
threading.Thread(
target=trigger_automatic_database_update,
args=("Post-scan automatic update",),
daemon=True
).start()
except Exception as e:
logger.error(f"Error starting automatic database update: {e}")
# Request scan with callback
result = web_scan_manager.request_scan(
reason=reason,
callback=scan_completion_callback if auto_database_update else None
)
add_activity_item("📡", "Media Scan", f"Scan requested: {reason}", "Now")
return jsonify({
"success": True,
"scan_info": result,
"auto_database_update": auto_database_update
})
except Exception as e:
logger.error(f"Error requesting media scan: {e}")
return jsonify({"success": False, "error": str(e)}), 500
@app.route('/api/scan/status', methods=['GET'])
def get_scan_status():
"""
Get current media scan status.
"""
try:
if not web_scan_manager:
return jsonify({"success": False, "error": "Scan manager not initialized"}), 500
status = web_scan_manager.get_scan_status()
return jsonify({"success": True, "status": status})
except Exception as e:
logger.error(f"Error getting scan status: {e}")
return jsonify({"success": False, "error": str(e)}), 500
@app.route('/api/database/incremental-update', methods=['POST'])
def request_incremental_database_update():
"""
Request an incremental database update with prerequisites checking.
"""
try:
data = request.get_json() or {}
reason = data.get('reason', 'Web UI manual request')
# Check prerequisites (similar to GUI logic)
db = get_database()
# Check if database has enough content for incremental updates
track_count = db.execute("SELECT COUNT(*) FROM tracks").fetchone()[0]
if track_count < 100:
return jsonify({
"success": False,
"error": f"Database has only {track_count} tracks - insufficient for incremental updates (minimum 100)",
"track_count": track_count
}), 400
# Check if there's been a previous full refresh
last_refresh = db.execute(
"SELECT value FROM system_info WHERE key = 'last_full_refresh'"
).fetchone()
if not last_refresh:
return jsonify({
"success": False,
"error": "No previous full refresh found - incremental updates require established database",
"suggestion": "Run a full refresh first"
}), 400
# Start incremental update
result = trigger_automatic_database_update(reason)
add_activity_item("🔄", "Database Update", f"Incremental update started: {reason}", "Now")
return jsonify({
"success": True,
"message": "Incremental database update started",
"track_count": track_count,
"last_refresh": last_refresh[0] if last_refresh else None,
"reason": reason
})
except Exception as e:
logger.error(f"Error requesting incremental database update: {e}")
return jsonify({"success": False, "error": str(e)}), 500
def trigger_automatic_database_update(reason="Automatic update"):
"""
Helper function to trigger automatic incremental database update.
"""
try:
from config.settings import config_manager
active_server = config_manager.get_active_media_server()
# Get the appropriate media client
media_client = None
if active_server == "jellyfin" and jellyfin_client:
media_client = jellyfin_client
elif active_server == "navidrome" and navidrome_client:
media_client = navidrome_client
else:
media_client = plex_client # Default fallback
if not media_client or not media_client.is_connected():
logger.error(f"No connected {active_server} client for automatic database update")
return False
# Create and start database update worker
worker = DatabaseUpdateWorker(
media_client=media_client,
server_type=active_server,
full_refresh=False # Always incremental for automatic updates
)
def update_completion_callback():
logger.info(f"✅ Automatic incremental database update completed for {active_server}")
add_activity_item("", "Database Update", f"Automatic update completed ({active_server})", "Now")
# Start update in background thread
update_thread = threading.Thread(
target=lambda: worker.run_with_callback(update_completion_callback),
daemon=True
)
update_thread.start()
logger.info(f"🔄 Started automatic incremental database update for {active_server}")
return True
except Exception as e:
logger.error(f"Error in automatic database update: {e}")
return False
@app.route('/api/test/automation', methods=['POST'])
def test_automation_workflow():
"""
Test endpoint to verify the automatic workflow functionality.
"""
try:
data = request.get_json() or {}
test_type = data.get('test_type', 'full')
results = {}
# Test 1: Scan manager status
if web_scan_manager:
scan_status = web_scan_manager.get_scan_status()
results['scan_manager'] = {'status': 'available', 'current_status': scan_status}
else:
results['scan_manager'] = {'status': 'unavailable'}
# Test 2: Database prerequisites
try:
db = get_database()
track_count = db.execute("SELECT COUNT(*) FROM tracks").fetchone()[0]
last_refresh = db.execute(
"SELECT value FROM system_info WHERE key = 'last_full_refresh'"
).fetchone()
results['database'] = {
'track_count': track_count,
'meets_minimum': track_count >= 100,
'has_previous_refresh': last_refresh is not None,
'last_refresh': last_refresh[0] if last_refresh else None
}
except Exception as e:
results['database'] = {'error': str(e)}
# Test 3: Media client connections
active_server = config_manager.get_active_media_server()
results['media_clients'] = {'active_server': active_server}
for client_name, client in [
('plex', plex_client),
('jellyfin', jellyfin_client),
('navidrome', navidrome_client)
]:
try:
is_connected = client.is_connected() if client else False
results['media_clients'][client_name] = {
'available': client is not None,
'connected': is_connected
}
except Exception as e:
results['media_clients'][client_name] = {
'available': client is not None,
'connected': False,
'error': str(e)
}
# Test 4: If requested, actually test the scan request
if test_type == 'full' and web_scan_manager:
try:
scan_result = web_scan_manager.request_scan(
reason="Automation test",
callback=None
)
results['scan_test'] = {'success': True, 'result': scan_result}
except Exception as e:
results['scan_test'] = {'success': False, 'error': str(e)}
return jsonify({
"success": True,
"test_results": results,
"automation_ready": (
results.get('scan_manager', {}).get('status') == 'available' and
results.get('database', {}).get('meets_minimum', False) and
results.get('database', {}).get('has_previous_refresh', False)
)
})
except Exception as e:
logger.error(f"Error in automation test: {e}")
return jsonify({"success": False, "error": str(e)}), 500
@app.route('/api/searches/clear-all', methods=['POST'])
def clear_all_searches():
"""

@ -3361,7 +3361,10 @@ async function closeDownloadMissingModal(playlistId) {
cleanupArtistDownload(playlistId);
console.log(`✅ [MODAL CLOSE] Artist download cleanup completed for: ${playlistId}`);
}
// Automatic cleanup and server operations after successful downloads
await handlePostDownloadAutomation(playlistId, process);
cleanupDownloadProcess(playlistId);
}
}
@ -5975,6 +5978,116 @@ window.matchedDownloadTrack = matchedDownloadTrack;
window.matchedDownloadAlbum = matchedDownloadAlbum;
window.matchedDownloadAlbumTrack = matchedDownloadAlbumTrack;
/**
* Handle automatic post-download operations: cleanup scan database update
* This replicates the GUI's automatic functionality after download modal completion
*/
async function handlePostDownloadAutomation(playlistId, process) {
try {
// Check if we have successful downloads that warrant automation
const successfulDownloads = getSuccessfulDownloadCount(process);
if (successfulDownloads === 0) {
console.log(`🔄 [AUTO] No successful downloads for ${playlistId} - skipping automation`);
return;
}
console.log(`🔄 [AUTO] Starting automatic post-download operations for ${playlistId} (${successfulDownloads} successful downloads)`);
// Step 1: Clear completed downloads from slskd
console.log(`🗑️ [AUTO] Step 1: Clearing completed downloads...`);
showToast('🗑️ Clearing completed downloads...', 'info', 3000);
try {
const clearResponse = await fetch('/api/downloads/clear-finished', {
method: 'POST',
headers: { 'Content-Type': 'application/json' }
});
if (clearResponse.ok) {
console.log(`✅ [AUTO] Step 1 complete: Downloads cleared`);
} else {
console.warn(`⚠️ [AUTO] Step 1 warning: Clear downloads failed, continuing anyway`);
}
} catch (error) {
console.warn(`⚠️ [AUTO] Step 1 error: ${error.message}, continuing anyway`);
}
// Step 2: Request media server scan
console.log(`📡 [AUTO] Step 2: Requesting media server scan...`);
showToast('📡 Scanning media server library...', 'info', 5000);
try {
const scanResponse = await fetch('/api/scan/request', {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({
reason: `Download modal completed for ${playlistId} (${successfulDownloads} tracks)`,
auto_database_update: true // This will trigger step 3 automatically after scan completes
})
});
const scanResult = await scanResponse.json();
if (scanResponse.ok && scanResult.success) {
console.log(`✅ [AUTO] Step 2 complete: Media scan requested`);
console.log(`🔄 [AUTO] Scan info:`, scanResult.scan_info);
// Show success toast with scan details
if (scanResult.scan_info.status === 'scheduled') {
showToast(`📡 Media scan scheduled (${scanResult.scan_info.delay_seconds}s delay)`, 'success', 5000);
} else {
showToast('📡 Media scan requested successfully', 'success', 3000);
}
// Database update will be triggered automatically by the scan completion callback
if (scanResult.auto_database_update) {
console.log(`🔄 [AUTO] Step 3 will run automatically after scan completes`);
showToast('🔄 Database update will follow automatically', 'info', 3000);
}
} else {
console.error(`❌ [AUTO] Step 2 failed: ${scanResult.error || 'Unknown scan error'}`);
showToast('❌ Media scan failed', 'error', 5000);
}
} catch (error) {
console.error(`❌ [AUTO] Step 2 error: ${error.message}`);
showToast('❌ Media scan request failed', 'error', 5000);
}
console.log(`🏁 [AUTO] Automatic post-download operations initiated for ${playlistId}`);
} catch (error) {
console.error(`❌ [AUTO] Error in post-download automation: ${error.message}`);
showToast('❌ Automatic operations failed', 'error', 5000);
}
}
/**
* Extract successful download count from a download process
*/
function getSuccessfulDownloadCount(process) {
try {
// For processes that have completed, check the modal for completed count
if (process && process.modalElement) {
const statElement = process.modalElement.querySelector('[id*="stat-downloaded-"]');
if (statElement && statElement.textContent) {
const count = parseInt(statElement.textContent, 10);
return isNaN(count) ? 0 : count;
}
}
// Fallback: assume successful if process completed without obvious failure
if (process && process.status === 'complete') {
return 1; // Conservative assumption for single download
}
return 0;
} catch (error) {
console.warn(`⚠️ [AUTO] Error getting successful download count: ${error.message}`);
return 0;
}
}
// Download Missing Tracks Modal functions
window.openDownloadMissingModal = openDownloadMissingModal;
window.closeDownloadMissingModal = closeDownloadMissingModal;

Loading…
Cancel
Save