Clean up 286 ruff lint errors to unblock CI and fix 10 latent bugs

PR #340 added ruff to the build-and-test.yml CI gate, which surfaced
286 pre-existing lint errors. Left unfixed, every feature branch push
fails CI. This commit resolves all of them so CI goes green and
contributors can actually land work.

Auto-fixes (248 of 286): removed unused f-string prefixes (F541),
renamed unused loop control variables with underscore prefix (B007),
removed duplicate imports (F811).

Manually fixed 10 latent bugs ruff caught (all wrapped in try/except
today, silently failing):

- music_database.py: _add_discovery_tables() called undefined
  conn.commit() — would have crashed the iTunes-support migration
  for existing databases. Now uses cursor.connection.commit().
- web_server.py settings GET: referenced undefined download_orchestrator
  when it should be soulseek_client. Feature (_source_status on the
  settings payload) was silently missing for UI auto-disable logic.
- web_server.py _process_wishlist_automatically: active_server
  undefined in track-ownership check. Auto-wishlist was falling
  through to the error handler and re-downloading owned tracks.
- web_server.py start_wishlist_missing_downloads: same active_server
  bug in the manual wishlist path.
- web_server.py _process_failed_tracks_to_wishlist_exact: emitted
  wishlist_item_added automation event with undefined artist_name
  and track. Automation event silently never fired correctly.
- web_server.py discovery metadata enrichment: referenced cache
  without calling get_metadata_cache() first. Track enrichment from
  cached API responses was silently skipped.
- web_server.py Beatport discovery worker: wing-it fallback branch
  used undefined successful_discoveries variable. Wing-it counter
  never incremented correctly. Now uses state['spotify_matches']
  consistently with the rest of the function.
- web_server.py _run_full_missing_tracks_process: stale import json
  mid-function shadowed the module-level import, making an earlier
  json.dumps() call reference an unbound local (F823).
- web_server.py discovery loop: platform loop variable shadowed
  the module-level platform import (F402).
- core/watchlist_scanner.py: 7 lambda captures of loop variables
  (B023 classic Python closure-in-loop bug) now bind at creation.

No existing tests had to change. Full suite stays at 263 passed.
pull/347/head
Broque Thomas 4 weeks ago
parent 32923e366c
commit d9217237d2

@ -1666,7 +1666,7 @@ class BeatportUnifiedScraper:
release_urls = []
urls_found = 0
for i, row in enumerate(table_rows):
for _i, row in enumerate(table_rows):
# Look for release link in this row
link_elem = row.select_one('a[href*="/release/"]')
if link_elem and link_elem.get('href'):
@ -1694,7 +1694,7 @@ class BeatportUnifiedScraper:
_beatport_log(f" Found {len(tracks)} individual tracks")
all_individual_tracks.extend(tracks)
else:
_beatport_log(f" No tracks found")
_beatport_log(" No tracks found")
# Add delay between requests to be respectful
if i < len(release_urls) - 1:
@ -1877,7 +1877,7 @@ class BeatportUnifiedScraper:
# Convert to our standard format (with Hype Picks branding)
converted_tracks = []
for i, track_data in enumerate(release_tracks):
for _i, track_data in enumerate(release_tracks):
track = self.convert_hype_picks_json_to_track_format(track_data, release_url, len(converted_tracks) + 1)
if track:
converted_tracks.append(track)
@ -2631,7 +2631,7 @@ class BeatportUnifiedScraper:
# Example: "Gods window, Pt. 1Thakzin,Thandazo,Xelimpilo"
lines = [line.strip() for line in text.split('\n') if line.strip()]
for i, line in enumerate(lines):
for _i, line in enumerate(lines):
# Look for lines that might contain title and artists
if len(line) > 5 and '$' not in line and 'Music' in line:
# This might be a title line
@ -2861,7 +2861,7 @@ class BeatportUnifiedScraper:
_beatport_log(f" Found {len(tracks)} individual tracks")
all_individual_tracks.extend(tracks)
else:
_beatport_log(f" No tracks found")
_beatport_log(" No tracks found")
# Add delay between requests to be respectful
if i < len(release_urls) - 1:
@ -2983,7 +2983,7 @@ class BeatportUnifiedScraper:
# If no dedicated hype page found, try main genre page for hype content
if not tracks:
_beatport_log(f" No dedicated hype page found, looking for hype content on main page...")
_beatport_log(" No dedicated hype page found, looking for hype content on main page...")
genre_url = f"{self.base_url}/genre/{genre['slug']}/{genre['id']}"
soup = self.get_page(genre_url)
if soup:
@ -3048,7 +3048,7 @@ class BeatportUnifiedScraper:
seen_urls = set()
# Process ALL links but stop when we reach the limit of unique URLs (same as Latest Releases)
for i, link in enumerate(release_links):
for _i, link in enumerate(release_links):
href = link.get('href')
if href:
# Ensure full URL (same as Latest Releases)
@ -3228,7 +3228,7 @@ class BeatportUnifiedScraper:
if not soup:
return tracks
_beatport_log(f" Looking for HYPE labeled tracks on page...")
_beatport_log(" Looking for HYPE labeled tracks on page...")
# Look for elements containing "HYPE" text
hype_elements = soup.find_all(text=re.compile(r'HYPE', re.I))
@ -3243,7 +3243,7 @@ class BeatportUnifiedScraper:
track_container = None
# Walk up the DOM tree to find a suitable container
for level in range(5):
for _level in range(5):
if parent:
# Look for track links in this container
track_links = parent.find_all('a', href=re.compile(r'/track/'))
@ -3314,7 +3314,7 @@ class BeatportUnifiedScraper:
if not soup:
return tracks
_beatport_log(f" Extracting hype tracks from Beatport page...")
_beatport_log(" Extracting hype tracks from Beatport page...")
# Method 1: Extract from Hype Picks carousel (release cards with HYPE badges)
hype_picks_tracks = self.extract_hype_picks_from_carousel(soup, list_name, limit)
@ -3551,7 +3551,7 @@ class BeatportUnifiedScraper:
seen_urls = set()
# Process ALL links but stop when we reach the limit of unique URLs (same as Latest Releases)
for i, link in enumerate(release_links):
for _i, link in enumerate(release_links):
href = link.get('href')
if href:
# Ensure full URL (same as Latest Releases)
@ -3627,7 +3627,7 @@ class BeatportUnifiedScraper:
seen_urls = set()
# Process ALL links but stop when we reach the limit of unique URLs (same as homepage)
for i, link in enumerate(release_links):
for _i, link in enumerate(release_links):
href = link.get('href')
if href:
# Ensure full URL (same as homepage)
@ -3724,12 +3724,12 @@ class BeatportUnifiedScraper:
"""Extract tracks from Beatport chart table structure (tracks-table class)"""
tracks = []
_beatport_log(f" DEBUG: Looking for tracks-table container...")
_beatport_log(" DEBUG: Looking for tracks-table container...")
# Look for the tracks table container
tracks_table = soup.find(class_=re.compile(r'tracks-table'))
if not tracks_table:
_beatport_log(f" No tracks-table container found")
_beatport_log(" No tracks-table container found")
# Debug: Let's see what table classes ARE available
all_tables = soup.find_all(['table', 'div'], class_=re.compile(r'table|Table', re.I))
_beatport_log(f" DEBUG: Found {len(all_tables)} table-like elements")
@ -3745,7 +3745,7 @@ class BeatportUnifiedScraper:
track_rows_class = tracks_table.find_all(class_=re.compile(r'Table.*Row.*tracks-table'))
track_rows_generic = tracks_table.find_all(class_=re.compile(r'Table.*Row'))
_beatport_log(f" DEBUG: Track rows found:")
_beatport_log(" DEBUG: Track rows found:")
_beatport_log(f" - By data-testid='tracks-table-row': {len(track_rows_testid)}")
_beatport_log(f" - By class pattern 'Table.*Row.*tracks-table': {len(track_rows_class)}")
_beatport_log(f" - By generic 'Table.*Row': {len(track_rows_generic)}")
@ -3754,7 +3754,7 @@ class BeatportUnifiedScraper:
track_rows = track_rows_testid or track_rows_class or track_rows_generic
if not track_rows:
_beatport_log(f" No track rows found in any format")
_beatport_log(" No track rows found in any format")
return tracks
_beatport_log(f" Using {len(track_rows)} track rows for extraction")
@ -3836,7 +3836,7 @@ class BeatportUnifiedScraper:
_beatport_log(f" Found {len(table_rows)} potential table rows")
for i, row in enumerate(table_rows[:limit]):
for _i, row in enumerate(table_rows[:limit]):
try:
# Skip header rows
if row.name == 'tr' and row.find('th'):
@ -4361,7 +4361,7 @@ def test_dynamic_genre_discovery():
_beatport_log("\nTEST 2: Genre Discovery with Images (Sample)")
genres_with_images = scraper.discover_genres_with_images(include_images=True)
_beatport_log(f"\nSample genres with images:")
_beatport_log("\nSample genres with images:")
for genre in genres_with_images[:3]:
_beatport_log(f"{genre['name']}: {genre.get('image_url', 'No image')}")
@ -4377,7 +4377,7 @@ def test_dynamic_genre_discovery():
for track in tracks:
_beatport_log(f"{track['artist']} - {track['title']}")
else:
_beatport_log(f" No tracks found")
_beatport_log(" No tracks found")
return genres
@ -4392,7 +4392,7 @@ def test_improved_chart_sections():
_beatport_log("\nTEST 1: Chart Section Discovery")
chart_discovery = scraper.discover_chart_sections()
_beatport_log(f"\nDiscovery Results:")
_beatport_log("\nDiscovery Results:")
summary = chart_discovery.get('summary', {})
_beatport_log(f" • Top Charts sections: {summary.get('top_charts_sections', 0)}")
_beatport_log(f" • Staff Picks sections: {summary.get('staff_picks_sections', 0)}")
@ -4497,7 +4497,7 @@ def main():
top_100 = scraper.scrape_top_100(limit=10) # Test with 10 for now
if top_100:
_beatport_log(f"\nTop 100 Sample (showing first 5):")
_beatport_log("\nTop 100 Sample (showing first 5):")
for track in top_100[:5]:
_beatport_log(f" {track['position']}. {track['artist']} - {track['title']}")
@ -4548,7 +4548,7 @@ def main():
all_tracks = (top_100 or []) + [track for tracks in all_genre_results.values() for track in tracks]
if all_tracks:
overall_quality = scraper.test_data_quality(all_tracks)
_beatport_log(f"\nOVERALL DATA QUALITY")
_beatport_log("\nOVERALL DATA QUALITY")
_beatport_log(f"• Quality Score: {overall_quality['quality_score']:.1f}%")
_beatport_log(f"• Valid Tracks: {overall_quality['valid_tracks']}/{overall_quality['total_tracks']}")
@ -4571,27 +4571,27 @@ def main():
try:
with open('beatport_unified_results.json', 'w', encoding='utf-8') as f:
json.dump(results, f, indent=2, ensure_ascii=False)
_beatport_log(f"\nResults saved to beatport_unified_results.json")
_beatport_log("\nResults saved to beatport_unified_results.json")
except Exception as e:
_beatport_log(f"Failed to save results: {e}")
# Virtual playlist possibilities
if overall_quality['quality_score'] > 70:
_beatport_log(f"\nSUCCESS! Ready for virtual playlist creation")
_beatport_log(f"You can now create playlists for:")
_beatport_log(f" • Beatport Top 100")
_beatport_log("\nSUCCESS! Ready for virtual playlist creation")
_beatport_log("You can now create playlists for:")
_beatport_log(" • Beatport Top 100")
for genre_name in list(all_genre_results.keys())[:5]:
_beatport_log(f"{genre_name} Top 100")
if len(all_genre_results) > 5:
_beatport_log(f" • ...and {len(all_genre_results) - 5} more genres!")
_beatport_log(f"\nIntegration Notes:")
_beatport_log(f" • Artist and title data is clean and ready")
_beatport_log("\nIntegration Notes:")
_beatport_log(" • Artist and title data is clean and ready")
_beatport_log(f"{total_genres} genres confirmed working")
_beatport_log(f" • Data quality: {overall_quality['quality_score']:.1f}%")
else:
_beatport_log(f"\nData quality needs improvement ({overall_quality['quality_score']:.1f}%)")
_beatport_log(f"Consider refining extraction methods")
_beatport_log("Consider refining extraction methods")
if __name__ == "__main__":

@ -274,7 +274,7 @@ class AcoustIDClient:
if not search_dir.exists():
continue
# Walk up to 2 levels deep to find an audio file quickly
for depth, pattern in enumerate(['*', '*/*']):
for _depth, pattern in enumerate(['*', '*/*']):
for f in search_dir.glob(pattern):
if f.is_file() and f.suffix.lower() in audio_extensions:
return str(f)

@ -270,7 +270,7 @@ class AutomationEngine:
"""Cancel all timers on shutdown."""
self._running = False
with self._lock:
for aid, timer in self._timers.items():
for _aid, timer in self._timers.items():
timer.cancel()
count = len(self._timers)
self._timers.clear()

@ -348,7 +348,7 @@ class DatabaseUpdateWorker:
total_artists = len(artists)
logger.info(f"Deep scan: Processing {total_artists} artists (sequential, skip-existing mode)")
for i, artist in enumerate(artists):
for _i, artist in enumerate(artists):
if self.should_stop:
break
@ -599,7 +599,7 @@ class DatabaseUpdateWorker:
result_msg = f"Smart incremental scan result: {len(artists_to_process)} artists to process from {albums_with_new_content} albums with new content"
if stopped_early:
result_msg += f" (stopped early after finding 25 consecutive complete albums)"
result_msg += " (stopped early after finding 25 consecutive complete albums)"
else:
result_msg += f" (checked all {total_tracks_checked} tracks from {len(recent_albums)} recent albums)"
@ -1224,7 +1224,7 @@ class DatabaseUpdateWorker:
# Process artists sequentially when requested (the web server uses this path).
if self.force_sequential:
# Sequential processing for web server mode
for i, artist in enumerate(artists):
for _i, artist in enumerate(artists):
if self.should_stop:
break

@ -72,11 +72,11 @@ def _decrypt_chunk(chunk: bytes, key: bytes) -> bytes:
cipher = Cipher(algorithms.Blowfish(key), modes.CBC(iv))
decryptor = cipher.decryptor()
return decryptor.update(chunk) + decryptor.finalize()
except ImportError:
except ImportError as exc:
raise ImportError(
"Deezer downloads require pycryptodome or cryptography package. "
"Install with: pip install pycryptodome"
)
) from exc
class DeezerDownloadClient:

@ -87,7 +87,7 @@ class DownloadOrchestrator:
# Reload underlying client configs (SLSKD URL, API key, etc.)
if self.soulseek:
self.soulseek._setup_client()
logger.info(f"Soulseek client config reloaded")
logger.info("Soulseek client config reloaded")
# Reconnect Deezer if ARL changed
deezer_arl = config_manager.get('deezer_download.arl', '')

@ -668,7 +668,7 @@ class HiFiClient:
"""Get all active downloads (Soulseek-compatible)."""
statuses = []
with self._download_lock:
for dl_id, info in self.active_downloads.items():
for _dl_id, info in self.active_downloads.items():
statuses.append(DownloadStatus(
id=info['id'],
filename=info['filename'],

@ -1555,9 +1555,9 @@ class JellyfinClient:
logger.info(f"Creating backup playlist '{backup_name}' before sync")
if self.copy_playlist(playlist_name, backup_name):
logger.info(f"Backup created successfully")
logger.info("Backup created successfully")
else:
logger.warning(f"Failed to create backup, continuing with sync")
logger.warning("Failed to create backup, continuing with sync")
if existing_playlist:
# Delete existing playlist using DELETE request

@ -311,7 +311,7 @@ class LidarrDownloadClient:
}
# Check if album already exists
existing = self._api_get(f'album', params={'foreignAlbumId': album.get('foreignAlbumId', '')})
existing = self._api_get('album', params={'foreignAlbumId': album.get('foreignAlbumId', '')})
if existing and isinstance(existing, list) and len(existing) > 0:
lidarr_album_id = existing[0].get('id')
# Trigger search for existing album

@ -997,9 +997,9 @@ class NavidromeClient:
# We only need to backup once, even if duplicates exist
if self.copy_playlist(playlist_name, backup_name):
logger.info(f"Backup created successfully")
logger.info("Backup created successfully")
else:
logger.warning(f"Failed to create backup, continuing with sync")
logger.warning("Failed to create backup, continuing with sync")
# STRATEGY: Update the first match, delete the rest
if existing_playlists:

@ -342,7 +342,7 @@ class PlexClient:
if valid_tracks:
# Debug the track objects before creating playlist
logger.debug(f"About to create playlist with tracks:")
logger.debug("About to create playlist with tracks:")
for i, track in enumerate(valid_tracks):
logger.debug(f" Track {i+1}: {track.title} (type: {type(track)}, ratingKey: {track.ratingKey})")
@ -378,7 +378,7 @@ class PlexClient:
return True
except Exception as final_error:
logger.error(f"Final playlist creation attempt failed: {final_error}")
raise create_error
raise create_error from final_error
else:
logger.error(f"No valid tracks with ratingKeys for playlist '{name}'")
return False
@ -464,9 +464,9 @@ class PlexClient:
logger.info(f"Creating backup playlist '{backup_name}' before sync")
if self.copy_playlist(playlist_name, backup_name):
logger.info(f"Backup created successfully")
logger.info("Backup created successfully")
else:
logger.warning(f"Failed to create backup, continuing with sync")
logger.warning("Failed to create backup, continuing with sync")
# Delete original and recreate
existing_playlist.delete()

@ -361,7 +361,7 @@ class QobuzClient:
if is_valid:
logger.debug(f"Secret test passed (HTTP {resp.status_code})")
else:
logger.debug(f"Secret test failed (HTTP 400 — invalid signature)")
logger.debug("Secret test failed (HTTP 400 — invalid signature)")
return is_valid
except Exception as e:
@ -1105,7 +1105,7 @@ class QobuzClient:
download_statuses = []
with self._download_lock:
for download_id, info in self.active_downloads.items():
for _download_id, info in self.active_downloads.items():
status = DownloadStatus(
id=info['id'],
filename=info['filename'],

@ -111,7 +111,7 @@ class DuplicateDetectorJob(RepairJob):
if context.report_progress:
context.report_progress(phase=f'Comparing {total} tracks...', total=total)
for bucket_key, bucket_tracks in buckets.items():
for _bucket_key, bucket_tracks in buckets.items():
if context.check_stop():
return result

@ -345,7 +345,7 @@ class LibraryReorganizeJob(RepairJob):
# API fallback: find (artist, album) pairs still missing year, batch-lookup
if needs_year and db_album_years is not None:
missing_pairs = set()
for fpath, tags in file_tags.items():
for _fpath, tags in file_tags.items():
year = tags.get('year', '')
if year:
continue

@ -282,7 +282,7 @@ class TrackNumberRepairJob(RepairJob):
album_name = None
artist_name = None
for fpath, fname, _ in file_track_data:
for fpath, _fname, _ in file_track_data:
if 'spotify' not in source_album_ids or 'itunes' not in source_album_ids:
aid, source = _read_album_id_from_file(fpath)
if aid and source in ('spotify', 'itunes') and source not in source_album_ids:

@ -483,7 +483,7 @@ class RepairWorker:
best_job_id = None
best_staleness = -1
for job_id, job in self._jobs.items():
for job_id, _job in self._jobs.items():
config = self.get_job_config(job_id)
if not config['enabled']:
continue
@ -1047,7 +1047,7 @@ class RepairWorker:
self._cleanup_empty_parents(resolved)
return {'success': True, 'action': 'moved_to_staging',
'message': f'Moved to staging folder for import'}
'message': 'Moved to staging folder for import'}
elif fix_action == 'delete':
os.remove(resolved)

@ -68,10 +68,10 @@ def analyze_track(file_path: str) -> Tuple[float, float]:
text=True,
timeout=120
)
except FileNotFoundError:
raise FileNotFoundError("ffmpeg not found on PATH")
except subprocess.TimeoutExpired:
raise RuntimeError("ffmpeg timed out analyzing track")
except FileNotFoundError as exc:
raise FileNotFoundError("ffmpeg not found on PATH") from exc
except subprocess.TimeoutExpired as exc:
raise RuntimeError("ffmpeg timed out analyzing track") from exc
stderr = result.stderr

@ -380,8 +380,8 @@ class SeasonalDiscoveryService:
cursor = conn.cursor()
# Build keyword search query
keyword_conditions = " OR ".join([f"LOWER(track_name) LIKE ?" for _ in keywords])
keyword_conditions += " OR " + " OR ".join([f"LOWER(album_name) LIKE ?" for _ in keywords])
keyword_conditions = " OR ".join(["LOWER(track_name) LIKE ?" for _ in keywords])
keyword_conditions += " OR " + " OR ".join(["LOWER(album_name) LIKE ?" for _ in keywords])
keyword_params = [f"%{kw}%" for kw in keywords] + [f"%{kw}%" for kw in keywords]
@ -840,7 +840,7 @@ class SeasonalDiscoveryService:
tracks_by_artist[artist].append(track)
balanced_tracks = []
for artist, artist_tracks in tracks_by_artist.items():
for _artist, artist_tracks in tracks_by_artist.items():
# Sort by popularity and take top 3
sorted_tracks = sorted(artist_tracks, key=lambda t: t.get('popularity', 50), reverse=True)
balanced_tracks.extend(sorted_tracks[:3])

@ -369,7 +369,7 @@ class SoulseekClient:
logger.debug(f"API request returned 404 (Not Found) for {url}")
elif response.status == 401:
if not getattr(self, '_last_401_logged', False):
logger.warning(f"slskd authentication failed (401) — check API key. Suppressing further 401 errors.")
logger.warning("slskd authentication failed (401) — check API key. Suppressing further 401 errors.")
self._last_401_logged = True
logger.debug(f"API request 401 for {url}")
else:
@ -823,7 +823,7 @@ class SoulseekClient:
logger.debug(f"No ID in response, using filename as fallback: {response}")
return filename
else:
logger.debug(f"Web interface endpoint returned no response")
logger.debug("Web interface endpoint returned no response")
except Exception as e:
logger.debug(f"Web interface endpoint failed: {e}")
@ -1044,7 +1044,7 @@ class SoulseekClient:
# Fallback: if download_id looks like a filename (contains path separators),
# list all transfers, find by filename, and cancel with the real transfer ID
if '\\' in download_id or '/' in download_id:
logger.debug(f"Download ID looks like a filename, trying filename-based lookup fallback")
logger.debug("Download ID looks like a filename, trying filename-based lookup fallback")
try:
downloads = await self.get_all_downloads()
target_basename = os.path.basename(download_id.replace('\\', '/'))
@ -1056,7 +1056,7 @@ class SoulseekClient:
logger.debug(f"Found matching transfer with real ID, trying: {fallback_endpoint}")
response = await self._make_request('DELETE', fallback_endpoint)
if response is not None:
logger.info(f"Successfully cancelled download via filename fallback")
logger.info("Successfully cancelled download via filename fallback")
return True
except Exception as fallback_error:
logger.debug(f"Filename fallback failed: {fallback_error}")
@ -1646,10 +1646,10 @@ class SoulseekClient:
logger.info(f"Quality Filter: Bit depth 24-bit preference — {len(hi_res)}/{len(quality_buckets['flac'])} FLAC candidates are hi-res")
quality_buckets['flac'] = hi_res
elif not bit_depth_fallback:
logger.info(f"Quality Filter: No 24-bit FLAC found and fallback disabled — rejecting all FLAC")
logger.info("Quality Filter: No 24-bit FLAC found and fallback disabled — rejecting all FLAC")
quality_buckets['flac'] = []
else:
logger.info(f"Quality Filter: No 24-bit FLAC found — falling back to 16-bit")
logger.info("Quality Filter: No 24-bit FLAC found — falling back to 16-bit")
elif bit_depth_pref == '16':
lo_res = [c for c in quality_buckets['flac']
@ -1658,10 +1658,10 @@ class SoulseekClient:
logger.info(f"Quality Filter: Bit depth 16-bit preference — {len(lo_res)}/{len(quality_buckets['flac'])} FLAC candidates are standard")
quality_buckets['flac'] = lo_res
elif not bit_depth_fallback:
logger.info(f"Quality Filter: No 16-bit FLAC found and fallback disabled — rejecting all FLAC")
logger.info("Quality Filter: No 16-bit FLAC found and fallback disabled — rejecting all FLAC")
quality_buckets['flac'] = []
else:
logger.info(f"Quality Filter: No 16-bit FLAC found — falling back to 24-bit")
logger.info("Quality Filter: No 16-bit FLAC found — falling back to 24-bit")
# Debug logging
for quality, bucket in quality_buckets.items():
@ -1688,16 +1688,16 @@ class SoulseekClient:
# If no enabled qualities matched, check if fallback is enabled
if profile.get('fallback_enabled', True):
logger.warning(f"Quality Filter: No enabled qualities matched, falling back to density-filtered candidates")
logger.warning("Quality Filter: No enabled qualities matched, falling back to density-filtered candidates")
if density_filtered_all:
density_filtered_all.sort(key=lambda x: (x.quality_score, self._calculate_effective_kbps(x.size, x.duration) or 0), reverse=True)
logger.info(f"Quality Filter: Returning {len(density_filtered_all)} fallback candidates (bitrate-filtered, any quality)")
return density_filtered_all
else:
logger.warning(f"Quality Filter: All candidates failed bitrate checks, returning empty (respecting constraints)")
logger.warning("Quality Filter: All candidates failed bitrate checks, returning empty (respecting constraints)")
return []
else:
logger.warning(f"Quality Filter: No enabled qualities matched and fallback is disabled, returning empty")
logger.warning("Quality Filter: No enabled qualities matched and fallback is disabled, returning empty")
return []
async def get_session_info(self) -> Optional[Dict[str, Any]]:

@ -260,7 +260,7 @@ class SoulSyncClient:
file_entries = [] # (file_path, tags)
scanned = 0
for root, dirs, files in os.walk(self._transfer_path):
for root, _dirs, files in os.walk(self._transfer_path):
for filename in files:
ext = os.path.splitext(filename)[1].lower()
if ext not in AUDIO_EXTENSIONS:

@ -318,7 +318,7 @@ def rate_limited(func):
# If Retry-After is long, activate global ban instead of sleeping
if delay and delay > _LONG_RATE_LIMIT_THRESHOLD:
_set_global_rate_limit(delay, func.__name__, has_real_header=True)
raise SpotifyRateLimitError(delay, func.__name__)
raise SpotifyRateLimitError(delay, func.__name__) from e
if delay:
delay = delay + 1

@ -800,7 +800,7 @@ class TidalClient:
)
if response.status_code == 429:
raise Exception(f"Rate limited (429) on search_artist")
raise Exception("Rate limited (429) on search_artist")
if response.status_code == 200:
data = response.json()
# JSON:API format: included artists in 'artists' or nested in relationships
@ -859,7 +859,7 @@ class TidalClient:
)
if response.status_code == 429:
raise Exception(f"Rate limited (429) on search_album")
raise Exception("Rate limited (429) on search_album")
if response.status_code == 200:
data = response.json()
items = []
@ -925,7 +925,7 @@ class TidalClient:
)
if response.status_code == 429:
raise Exception(f"Rate limited (429) on search_track")
raise Exception("Rate limited (429) on search_track")
if response.status_code == 200:
data = response.json()
items = []
@ -984,7 +984,7 @@ class TidalClient:
)
if response.status_code == 429:
raise Exception(f"Rate limited (429) on get_artist")
raise Exception("Rate limited (429) on get_artist")
if response.status_code == 200:
data = response.json()
# Handle JSON:API format
@ -1018,7 +1018,7 @@ class TidalClient:
)
if response.status_code == 429:
raise Exception(f"Rate limited (429) on get_album")
raise Exception("Rate limited (429) on get_album")
if response.status_code == 200:
data = response.json()
if 'data' in data and 'attributes' in data.get('data', {}):
@ -1051,7 +1051,7 @@ class TidalClient:
)
if response.status_code == 429:
raise Exception(f"Rate limited (429) on get_track")
raise Exception("Rate limited (429) on get_track")
if response.status_code == 200:
data = response.json()
if 'data' in data and 'attributes' in data.get('data', {}):
@ -1132,7 +1132,7 @@ class TidalClient:
break
if not tracks_page or not tracks_page.get("data"):
logger.info(f"No more tracks found, stopping pagination")
logger.info("No more tracks found, stopping pagination")
break
# Reset failure counter on success

@ -675,7 +675,7 @@ class TidalDownloadClient:
download_statuses = []
with self._download_lock:
for download_id, info in self.active_downloads.items():
for _download_id, info in self.active_downloads.items():
status = DownloadStatus(
id=info['id'],
filename=info['filename'],

@ -1117,7 +1117,7 @@ class WatchlistScanner:
albums = discography_result.albums
source_artist_id = discography_result.artist_id
artist_image_url = discography_result.image_url or self.get_artist_image_url(artist) or ''
album_fetcher = lambda album_id, album_name='': self._get_album_data_for_source(source, album_id, album_name)
album_fetcher = lambda album_id, album_name='', source=source: self._get_album_data_for_source(source, album_id, album_name)
absolute_index = artist_index_offset + i + 1
if scan_state is not None:
@ -1379,7 +1379,7 @@ class WatchlistScanner:
rescan_cutoff = self._get_rescan_cutoff()
if rescan_cutoff == 'all':
if self._rescan_cutoff_log_marker != 'all':
logger.info(f"Lookback period changed to 'all' — returning full discography")
logger.info("Lookback period changed to 'all' — returning full discography")
self._rescan_cutoff_log_marker = 'all'
cutoff_timestamp = None
needs_full_discog = True
@ -1605,7 +1605,7 @@ class WatchlistScanner:
if hasattr(self, '_metadata_service') and self._metadata_service:
results = self._metadata_service.itunes.search_artists(artist_name, limit=5)
else:
logger.warning(f"Cannot match to iTunes - MetadataService not available")
logger.warning("Cannot match to iTunes - MetadataService not available")
return None
return self._best_artist_match(results, artist_name)
@ -2856,11 +2856,11 @@ class WatchlistScanner:
cache_callback = None
if source == 'spotify':
cache_callback = lambda found_id, watchlist_id=artist.id: self._cache_watchlist_artist_source_id(artist, 'spotify', found_id)
cache_callback = lambda found_id, watchlist_id=artist.id, artist=artist: self._cache_watchlist_artist_source_id(artist, 'spotify', found_id)
elif source == 'itunes':
cache_callback = lambda found_id, watchlist_id=artist.id: self._cache_watchlist_artist_source_id(artist, 'itunes', found_id)
cache_callback = lambda found_id, watchlist_id=artist.id, artist=artist: self._cache_watchlist_artist_source_id(artist, 'itunes', found_id)
elif source == 'deezer':
cache_callback = lambda found_id, watchlist_id=artist.id: self._cache_watchlist_artist_source_id(artist, 'deezer', found_id)
cache_callback = lambda found_id, watchlist_id=artist.id, artist=artist: self._cache_watchlist_artist_source_id(artist, 'deezer', found_id)
artist_id = self._resolve_artist_id_for_source(
source,
@ -3100,11 +3100,11 @@ class WatchlistScanner:
stored_id = getattr(artist, source_attr, None) if source_attr else None
cache_callback = None
if source == 'spotify':
cache_callback = lambda found_id, watchlist_id=artist.id: self._cache_watchlist_artist_source_id(artist, 'spotify', found_id)
cache_callback = lambda found_id, watchlist_id=artist.id, artist=artist: self._cache_watchlist_artist_source_id(artist, 'spotify', found_id)
elif source == 'itunes':
cache_callback = lambda found_id, watchlist_id=artist.id: self._cache_watchlist_artist_source_id(artist, 'itunes', found_id)
cache_callback = lambda found_id, watchlist_id=artist.id, artist=artist: self._cache_watchlist_artist_source_id(artist, 'itunes', found_id)
elif source == 'deezer':
cache_callback = lambda found_id, watchlist_id=artist.id: self._cache_watchlist_artist_source_id(artist, 'deezer', found_id)
cache_callback = lambda found_id, watchlist_id=artist.id, artist=artist: self._cache_watchlist_artist_source_id(artist, 'deezer', found_id)
artist_id = self._resolve_artist_id_for_source(
source,
@ -3428,7 +3428,7 @@ class WatchlistScanner:
# Balance by artist - max 6 tracks per artist
balanced_track_data = []
for artist, tracks in artist_track_data.items():
for _artist, tracks in artist_track_data.items():
sorted_tracks = sorted(tracks, key=lambda t: t['score'], reverse=True)
balanced_track_data.extend(sorted_tracks[:6])

@ -39,7 +39,7 @@ class WishlistService:
# Extract Spotify track data from the track_info structure
spotify_track = self._extract_spotify_track_from_modal_info(track_info)
if not spotify_track:
logger.error(f"Could not extract Spotify track data from modal info")
logger.error("Could not extract Spotify track data from modal info")
return False
# Get failure reason from track_info if available

@ -26,8 +26,8 @@ from enum import Enum
try:
import yt_dlp
except ImportError:
raise ImportError("yt-dlp is required. Install with: pip install yt-dlp")
except ImportError as exc:
raise ImportError("yt-dlp is required. Install with: pip install yt-dlp") from exc
from utils.logging_config import get_logger
from core.matching_engine import MusicMatchingEngine
@ -382,7 +382,7 @@ class YouTubeClient:
# If we already have both locally, use them
if ffmpeg_path.exists() and ffprobe_path.exists():
logger.info(f"Found ffmpeg and ffprobe in tools folder")
logger.info("Found ffmpeg and ffprobe in tools folder")
# Add to PATH so yt-dlp can find them
tools_dir_str = str(tools_dir.absolute())
os.environ['PATH'] = tools_dir_str + os.pathsep + os.environ.get('PATH', '')
@ -397,10 +397,10 @@ class YouTubeClient:
url = 'https://github.com/BtbN/FFmpeg-Builds/releases/download/latest/ffmpeg-master-latest-win64-gpl.zip'
zip_path = tools_dir / 'ffmpeg.zip'
logger.info(f" Downloading from GitHub (this may take a minute)...")
logger.info(" Downloading from GitHub (this may take a minute)...")
urllib.request.urlretrieve(url, zip_path)
logger.info(f" Extracting ffmpeg.exe and ffprobe.exe...")
logger.info(" Extracting ffmpeg.exe and ffprobe.exe...")
with zipfile.ZipFile(zip_path, 'r') as zip_ref:
# Extract ffmpeg.exe and ffprobe.exe from the bin folder
for file in zip_ref.namelist():
@ -418,10 +418,10 @@ class YouTubeClient:
url = 'https://github.com/BtbN/FFmpeg-Builds/releases/download/latest/ffmpeg-master-latest-linux64-gpl.tar.xz'
tar_path = tools_dir / 'ffmpeg.tar.xz'
logger.info(f" Downloading from GitHub (this may take a minute)...")
logger.info(" Downloading from GitHub (this may take a minute)...")
urllib.request.urlretrieve(url, tar_path)
logger.info(f" Extracting ffmpeg and ffprobe...")
logger.info(" Extracting ffmpeg and ffprobe...")
with tarfile.open(tar_path, 'r:xz') as tar_ref:
for member in tar_ref.getmembers():
if member.name.endswith('bin/ffmpeg'):
@ -437,17 +437,17 @@ class YouTubeClient:
elif system == 'darwin':
# Download Mac ffmpeg and ffprobe (static builds)
logger.info(f" Downloading ffmpeg from evermeet.cx...")
logger.info(" Downloading ffmpeg from evermeet.cx...")
ffmpeg_url = 'https://evermeet.cx/ffmpeg/getrelease/zip'
ffmpeg_zip = tools_dir / 'ffmpeg.zip'
urllib.request.urlretrieve(ffmpeg_url, ffmpeg_zip)
logger.info(f" Downloading ffprobe from evermeet.cx...")
logger.info(" Downloading ffprobe from evermeet.cx...")
ffprobe_url = 'https://evermeet.cx/ffmpeg/getrelease/ffprobe/zip'
ffprobe_zip = tools_dir / 'ffprobe.zip'
urllib.request.urlretrieve(ffprobe_url, ffprobe_zip)
logger.info(f" Extracting ffmpeg and ffprobe...")
logger.info(" Extracting ffmpeg and ffprobe...")
with zipfile.ZipFile(ffmpeg_zip, 'r') as zip_ref:
zip_ref.extract('ffmpeg', tools_dir)
with zipfile.ZipFile(ffprobe_zip, 'r') as zip_ref:
@ -473,10 +473,10 @@ class YouTubeClient:
except Exception as e:
logger.error(f"Failed to download ffmpeg: {e}")
logger.error(f" Please install manually:")
logger.error(f" Windows: scoop install ffmpeg")
logger.error(f" Linux: sudo apt install ffmpeg")
logger.error(f" Mac: brew install ffmpeg")
logger.error(" Please install manually:")
logger.error(" Windows: scoop install ffmpeg")
logger.error(" Linux: sudo apt install ffmpeg")
logger.error(" Mac: brew install ffmpeg")
return False
def _youtube_to_track_result(self, entry: dict, best_audio: Optional[dict] = None) -> TrackResult:
@ -1057,7 +1057,7 @@ class YouTubeClient:
# Check if it's a 403 error
if '403' in error_msg or 'Forbidden' in error_msg:
if attempt < max_retries - 1:
logger.info(f"Waiting 2 seconds before retry...")
logger.info("Waiting 2 seconds before retry...")
import time
time.sleep(2)
continue # Retry on 403
@ -1148,7 +1148,7 @@ class YouTubeClient:
download_statuses = []
with self._download_lock:
for download_id, download_info in self.active_downloads.items():
for _download_id, download_info in self.active_downloads.items():
status = DownloadStatus(
id=download_info['id'],
filename=download_info['filename'],
@ -1276,11 +1276,11 @@ class YouTubeClient:
if audio.tags is not None:
# Delete ALL existing frames
audio.tags.clear()
logger.debug(f" Cleared all existing tag frames")
logger.debug(" Cleared all existing tag frames")
else:
# No tags exist, add them
audio.add_tags()
logger.debug(f" Added new tag structure")
logger.debug(" Added new tag structure")
if spotify_track:
# Use Spotify metadata
@ -1304,7 +1304,7 @@ class YouTubeClient:
except:
pass
logger.debug(f" Setting metadata tags...")
logger.debug(" Setting metadata tags...")
# Set ID3 tags (using setall to ensure they're set)
audio.tags.setall('TIT2', [TIT2(encoding=3, text=title)])
@ -1337,7 +1337,7 @@ class YouTubeClient:
logger.debug(f" Year: {year}")
# Fetch and embed album art from Spotify (via search)
logger.debug(f" Fetching album art from Spotify...")
logger.debug(" Fetching album art from Spotify...")
album_art_url = self._get_spotify_album_art(spotify_track)
if album_art_url:
@ -1367,11 +1367,11 @@ class YouTubeClient:
except Exception as art_error:
logger.warning(f" Could not embed album art: {art_error}")
else:
logger.warning(f" No album art found on Spotify")
logger.warning(" No album art found on Spotify")
# Save all tags
audio.save()
logger.info(f"Metadata enhanced successfully")
logger.info("Metadata enhanced successfully")
# Return album art URL for cover.jpg creation
return album_art_url
@ -1415,10 +1415,10 @@ class YouTubeClient:
# Don't overwrite existing cover art
if cover_path.exists():
logger.debug(f" cover.jpg already exists, skipping")
logger.debug(" cover.jpg already exists, skipping")
return
logger.debug(f" Downloading cover.jpg...")
logger.debug(" Downloading cover.jpg...")
response = requests.get(album_art_url, timeout=10)
response.raise_for_status()
@ -1440,10 +1440,10 @@ class YouTubeClient:
from core.lyrics_client import lyrics_client
if not lyrics_client.api:
logger.debug(f" LRClib API not available - skipping lyrics")
logger.debug(" LRClib API not available - skipping lyrics")
return
logger.debug(f" Fetching lyrics from LRClib...")
logger.debug(" Fetching lyrics from LRClib...")
# Get track metadata
artist_name = spotify_track.artists[0] if spotify_track.artists else "Unknown Artist"
@ -1461,12 +1461,12 @@ class YouTubeClient:
)
if success:
logger.debug(f" Created .lrc lyrics file")
logger.debug(" Created .lrc lyrics file")
else:
logger.debug(f" No lyrics found on LRClib")
logger.debug(" No lyrics found on LRClib")
except ImportError:
logger.debug(f" lyrics_client not available - skipping lyrics")
logger.debug(" lyrics_client not available - skipping lyrics")
except Exception as e:
logger.warning(f" Could not create lyrics file: {e}")

@ -163,7 +163,6 @@ class MusicDatabase:
"""SQLite database manager for SoulSync music library data"""
def __init__(self, database_path: str = None):
import os
# Use env var if path is None OR if it's the default path
# This ensures Docker containers use the correct mounted volume location
if database_path is None or database_path == "database/music_library.db":
@ -1204,7 +1203,7 @@ class MusicDatabase:
cursor.execute(f"INSERT OR IGNORE INTO discovery_recent_albums_new ({cols_str}) SELECT {cols_str} FROM discovery_recent_albums")
cursor.execute("DROP TABLE discovery_recent_albums")
cursor.execute("ALTER TABLE discovery_recent_albums_new RENAME TO discovery_recent_albums")
conn.commit()
cursor.connection.commit()
logger.info("Successfully migrated discovery_recent_albums table for iTunes support")
# Migration: Add UNIQUE constraint to similar_artists table
@ -3526,7 +3525,6 @@ class MusicDatabase:
def get_db_storage_stats(self):
"""Get database storage breakdown by table."""
import os
conn = None
try:
# Total file size
@ -3989,7 +3987,7 @@ class MusicDatabase:
conn.commit()
return cursor.rowcount > 0
except sqlite3.IntegrityError:
logger.warning(f"Profile update failed (duplicate name?)")
logger.warning("Profile update failed (duplicate name?)")
return False
except Exception as e:
logger.error(f"Error updating profile {profile_id}: {e}")
@ -5547,7 +5545,7 @@ class MusicDatabase:
u_words = uncensored.lower().split()
if len(c_words) == len(u_words):
all_match = True
for cw, uw in zip(c_words, u_words):
for cw, uw in zip(c_words, u_words, strict=False):
if '*' in cw:
# Strip asterisks to get the visible prefix/suffix
# "b*****t" → prefix "b", suffix "t"
@ -5675,7 +5673,6 @@ class MusicDatabase:
def _get_album_formats(self, cursor, sibling_ids: list) -> List[str]:
"""Get distinct format strings for tracks in the given album IDs."""
import os
try:
placeholders = ','.join('?' for _ in sibling_ids)
cursor.execute(f"""
@ -6178,7 +6175,7 @@ class MusicDatabase:
# Debug logging for Unicode normalization
if search_title != search_title_norm or search_artist != search_artist_norm or \
db_track.title != db_title_norm or db_track.artist_name != db_artist_norm:
logger.debug(f"Unicode normalization:")
logger.debug("Unicode normalization:")
logger.debug(f" Search: '{search_title}''{search_title_norm}' | '{search_artist}''{search_artist_norm}'")
logger.debug(f" Database: '{db_track.title}''{db_title_norm}' | '{db_track.artist_name}''{db_artist_norm}'")
@ -10026,7 +10023,7 @@ class MusicDatabase:
ORDER BY g.artist_name ASC, g.created_at DESC
""")
columns = [desc[0] for desc in cursor.description]
return [dict(zip(columns, row)) for row in cursor.fetchall()]
return [dict(zip(columns, row, strict=False)) for row in cursor.fetchall()]
except Exception as e:
logger.error(f"Error getting retag groups: {e}")
return []
@ -10042,7 +10039,7 @@ class MusicDatabase:
ORDER BY disc_number ASC, track_number ASC
""", (group_id,))
columns = [desc[0] for desc in cursor.description]
return [dict(zip(columns, row)) for row in cursor.fetchall()]
return [dict(zip(columns, row, strict=False)) for row in cursor.fetchall()]
except Exception as e:
logger.error(f"Error getting retag tracks: {e}")
return []
@ -11165,7 +11162,7 @@ class MusicDatabase:
LIMIT ? OFFSET ?
""", (automation_id, limit, offset))
cols = [d[0] for d in cursor.description]
rows = [dict(zip(cols, row)) for row in cursor.fetchall()]
rows = [dict(zip(cols, row, strict=False)) for row in cursor.fetchall()]
return {'history': rows, 'total': total}
except Exception as e:
logger.error(f"Error getting automation run history for {automation_id}: {e}")
@ -11534,7 +11531,6 @@ def get_database(database_path: str = None) -> MusicDatabase:
database_path: Path to database file. If None or default path, uses DATABASE_PATH env var
or defaults to "database/music_library.db". Custom paths are used as-is.
"""
import os
# Use env var if path is None OR if it's the default path
# This ensures Docker containers use the correct mounted volume location
if database_path is None or database_path == "database/music_library.db":
@ -11553,7 +11549,7 @@ def close_database():
with _database_lock:
# Close all database instances
for thread_id, db_instance in list(_database_instances.items()):
for _thread_id, db_instance in list(_database_instances.items()):
try:
db_instance.close()
except Exception as e:

@ -294,7 +294,7 @@ class PlaylistSyncService:
# Use active media server for playlist sync
media_client, server_type = self._get_active_media_client()
if not media_client:
logger.error(f"No active media client available for playlist sync")
logger.error("No active media client available for playlist sync")
sync_success = False
else:
logger.info(f"Syncing playlist '{playlist.name}' to {server_type.upper()} server")
@ -615,7 +615,7 @@ class PlaylistSyncService:
try:
media_client, server_type = self._get_active_media_client()
if not media_client:
logger.error(f"No active media client available")
logger.error("No active media client available")
return []
if hasattr(media_client, 'search_tracks'):
@ -714,7 +714,7 @@ class PlaylistSyncService:
media_client, server_type = self._get_active_media_client()
if not media_client:
return {"error": f"No active media client available"}
return {"error": "No active media client available"}
media_playlists = media_client.get_all_playlists() if hasattr(media_client, 'get_all_playlists') else []
media_stats = media_client.get_library_stats() if hasattr(media_client, 'get_library_stats') else {}

File diff suppressed because it is too large Load Diff
Loading…
Cancel
Save