Refactor Plex Database Repair Logic

- Created a centralized database repair script (`plex-database-repair.sh`) to handle all database integrity checks and repairs for Plex Media Server.
- Updated the main Plex management script (`plex.sh`) to integrate the new repair functionality and fixed Unicode/ASCII display issues.
- Refactored the backup script (`backup-plex.sh`) to remove duplicate repair functions and ensure it utilizes the new repair script.
- Conducted thorough code validation and functional testing to ensure all scripts operate correctly with the new changes.
- Enhanced documentation for the new repair script, detailing usage, features, and integration points with other scripts.
- Fixed critical bugs related to WAL file handling and corrected typos in script options.
This commit is contained in:
Peter Wood
2025-06-21 06:30:07 -04:00
parent d066f32b10
commit 2bc9e91229
5 changed files with 1387 additions and 938 deletions

View File

@@ -97,7 +97,7 @@ while [[ $# -gt 0 ]]; do
shift
;;
--disable-auto-repair)
AUTO_REPAIR=false.service
AUTO_REPAIR=false
shift
;;
--non-interactive)
@@ -169,7 +169,12 @@ log_message() {
timestamp=$(date '+%Y-%m-%d %H:%M:%S')
echo -e "${CYAN}[${timestamp}]${NC} ${message}"
mkdir -p "${LOCAL_LOG_ROOT}"
echo "[${timestamp}] ${message}" >> "${LOCAL_LOG_ROOT}/plex-backup-$(date '+%Y-%m-%d').log" 2>/dev/null || true
# Ensure acedanger owns the log directory
sudo chown acedanger:acedanger "${LOCAL_LOG_ROOT}" 2>/dev/null || true
local log_file="${LOCAL_LOG_ROOT}/plex-backup-$(date '+%Y-%m-%d').log"
echo "[${timestamp}] ${message}" >> "$log_file" 2>/dev/null || true
# Ensure acedanger owns the log file
sudo chown acedanger:acedanger "$log_file" 2>/dev/null || true
}
log_error() {
@@ -178,7 +183,10 @@ log_error() {
timestamp=$(date '+%Y-%m-%d %H:%M:%S')
echo -e "${RED}[${timestamp}] ERROR:${NC} ${message}"
mkdir -p "${LOCAL_LOG_ROOT}"
echo "[${timestamp}] ERROR: ${message}" >> "${LOCAL_LOG_ROOT}/plex-backup-$(date '+%Y-%m-%d').log" 2>/dev/null || true
local log_file="${LOCAL_LOG_ROOT}/plex-backup-$(date '+%Y-%m-%d').log"
echo "[${timestamp}] ERROR: ${message}" >> "$log_file" 2>/dev/null || true
# Ensure acedanger owns the log file
sudo chown acedanger:acedanger "$log_file" 2>/dev/null || true
}
log_success() {
@@ -187,7 +195,10 @@ log_success() {
timestamp=$(date '+%Y-%m-%d %H:%M:%S')
echo -e "${GREEN}[${timestamp}] SUCCESS:${NC} ${message}"
mkdir -p "$LOCAL_LOG_ROOT"
echo "[${timestamp}] SUCCESS: $message" >> "${LOCAL_LOG_ROOT}/plex-backup-$(date '+%Y-%m-%d').log" 2>/dev/null || true
local log_file="${LOCAL_LOG_ROOT}/plex-backup-$(date '+%Y-%m-%d').log"
echo "[${timestamp}] SUCCESS: $message" >> "$log_file" 2>/dev/null || true
# Ensure acedanger owns the log file
sudo chown acedanger:acedanger "$log_file" 2>/dev/null || true
}
log_warning() {
@@ -196,7 +207,10 @@ log_warning() {
timestamp=$(date '+%Y-%m-%d %H:%M:%S')
echo -e "${YELLOW}[${timestamp}] WARNING:${NC} ${message}"
mkdir -p "$LOCAL_LOG_ROOT"
echo "[${timestamp}] WARNING: $message" >> "${LOCAL_LOG_ROOT}/plex-backup-$(date '+%Y-%m-%d').log" 2>/dev/null || true
local log_file="${LOCAL_LOG_ROOT}/plex-backup-$(date '+%Y-%m-%d').log"
echo "[${timestamp}] WARNING: $message" >> "$log_file" 2>/dev/null || true
# Ensure acedanger owns the log file
sudo chown acedanger:acedanger "$log_file" 2>/dev/null || true
}
log_info() {
@@ -205,7 +219,10 @@ log_info() {
timestamp=$(date '+%Y-%m-%d %H:%M:%S')
echo -e "${BLUE}[${timestamp}] INFO:${NC} ${message}"
mkdir -p "$LOCAL_LOG_ROOT"
echo "[${timestamp}] INFO: $message" >> "${LOCAL_LOG_ROOT}/plex-backup-$(date '+%Y-%m-%d').log" 2>/dev/null || true
local log_file="${LOCAL_LOG_ROOT}/plex-backup-$(date '+%Y-%m-%d').log"
echo "[${timestamp}] INFO: $message" >> "$log_file" 2>/dev/null || true
# Ensure acedanger owns the log file
sudo chown acedanger:acedanger "$log_file" 2>/dev/null || true
}
# Performance tracking functions
@@ -222,7 +239,11 @@ track_performance() {
# Initialize performance log if it doesn't exist
if [ ! -f "$PERFORMANCE_LOG_FILE" ]; then
mkdir -p "$(dirname "$PERFORMANCE_LOG_FILE")"
# Ensure acedanger owns the log directory
sudo chown -R acedanger:acedanger "$(dirname "$PERFORMANCE_LOG_FILE")" 2>/dev/null || true
echo "[]" > "$PERFORMANCE_LOG_FILE"
# Ensure acedanger owns the performance log file
sudo chown acedanger:acedanger "$PERFORMANCE_LOG_FILE" 2>/dev/null || true
fi
# Add performance entry
@@ -243,6 +264,8 @@ track_performance() {
jq --argjson entry "$entry" '. += [$entry]' "$PERFORMANCE_LOG_FILE" > "${PERFORMANCE_LOG_FILE}.tmp" && \
mv "${PERFORMANCE_LOG_FILE}.tmp" "$PERFORMANCE_LOG_FILE"
# Ensure acedanger owns the performance log file
sudo chown acedanger:acedanger "$PERFORMANCE_LOG_FILE" 2>/dev/null || true
log_info "Performance: $operation completed in ${duration}s"
}
@@ -250,8 +273,12 @@ track_performance() {
# Initialize log directory
initialize_logs() {
mkdir -p "$(dirname "$PERFORMANCE_LOG_FILE")"
# Ensure acedanger owns the log directory
sudo chown -R acedanger:acedanger "${LOCAL_LOG_ROOT}" 2>/dev/null || true
if [ ! -f "$PERFORMANCE_LOG_FILE" ]; then
echo "[]" > "$PERFORMANCE_LOG_FILE"
# Ensure acedanger owns the performance log file
sudo chown acedanger:acedanger "$PERFORMANCE_LOG_FILE" 2>/dev/null || true
log_message "Initialized performance log file"
fi
}
@@ -502,42 +529,150 @@ calculate_checksum() {
return 1
}
# Check database integrity using Plex SQLite
# WAL file handling for backup operations (different from repair-specific function)
handle_wal_files() {
local operation="$1"
local backup_path="$2"
case "$operation" in
"checkpoint")
log_message "Performing WAL checkpoint..."
local checkpoint_errors=0
for nickname in "${!PLEX_FILES[@]}"; do
local file="${PLEX_FILES[$nickname]}"
# Only checkpoint database files
if [[ "$file" == *".db" ]] && [ -f "$file" ]; then
local db_name
db_name=$(basename "$file")
log_info "Checkpointing WAL for $db_name..."
# Perform WAL checkpoint with TRUNCATE to ensure all data is moved to main DB
if sudo "$PLEX_SQLITE" "$file" "PRAGMA wal_checkpoint(TRUNCATE);" >/dev/null 2>&1; then
log_success "WAL checkpoint completed for $db_name"
else
log_warning "WAL checkpoint failed for $db_name"
((checkpoint_errors++))
fi
fi
done
if [ "$checkpoint_errors" -gt 0 ]; then
log_warning "WAL checkpoint completed with $checkpoint_errors errors"
return 1
else
log_success "All WAL checkpoints completed successfully"
return 0
fi
;;
"backup")
if [ -z "$backup_path" ]; then
log_error "Backup path required for WAL file backup"
return 1
fi
log_message "Backing up WAL and SHM files..."
local wal_files_backed_up=0
local wal_backup_errors=0
for nickname in "${!PLEX_FILES[@]}"; do
local file="${PLEX_FILES[$nickname]}"
# Only process database files
if [[ "$file" == *".db" ]] && [ -f "$file" ]; then
local wal_file="${file}-wal"
local shm_file="${file}-shm"
# Backup WAL file if it exists
if [ -f "$wal_file" ]; then
local wal_basename
wal_basename=$(basename "$wal_file")
local backup_file="$backup_path/$wal_basename"
if sudo cp "$wal_file" "$backup_file"; then
# Force filesystem sync to prevent corruption
sync
sudo chown plex:plex "$backup_file"
log_success "Backed up WAL file: $wal_basename"
((wal_files_backed_up++))
else
log_error "Failed to backup WAL file: $wal_basename"
((wal_backup_errors++))
fi
fi
# Backup SHM file if it exists
if [ -f "$shm_file" ]; then
local shm_basename
shm_basename=$(basename "$shm_file")
local backup_file="$backup_path/$shm_basename"
if sudo cp "$shm_file" "$backup_file"; then
# Force filesystem sync to prevent corruption
sync
sudo chown plex:plex "$backup_file"
log_success "Backed up SHM file: $shm_basename"
((wal_files_backed_up++))
else
log_error "Failed to backup SHM file: $shm_basename"
((wal_backup_errors++))
fi
fi
fi
done
if [ "$wal_files_backed_up" -gt 0 ]; then
log_success "Backed up $wal_files_backed_up WAL/SHM files"
else
log_info "No WAL/SHM files found to backup"
fi
if [ "$wal_backup_errors" -gt 0 ]; then
log_error "WAL file backup completed with $wal_backup_errors errors"
return 1
else
return 0
fi
;;
*)
log_error "Unknown WAL operation: $operation"
return 1
;;
esac
}
# Check database integrity using shared repair script
check_database_integrity() {
local db_file="$1"
local db_name
db_name=$(basename "$db_file")
local repair_script="${SCRIPT_DIR}/plex-database-repair.sh"
log_message "Checking database integrity: $db_name"
# Check if Plex SQLite exists
if [ ! -f "$PLEX_SQLITE" ]; then
log_error "Plex SQLite binary not found at: $PLEX_SQLITE"
return 1
# Check if shared repair script exists
if [[ ! -f "$repair_script" ]]; then
log_error "Database repair script not found at: $repair_script"
return 2
fi
# Make Plex SQLite executable if it isn't already
sudo chmod +x "$PLEX_SQLITE" 2>/dev/null || true
# Run integrity check
local integrity_result
integrity_result=$(sudo "$PLEX_SQLITE" "$db_file" "PRAGMA integrity_check;" 2>&1)
local check_exit_code=$?
if [ $check_exit_code -ne 0 ]; then
log_error "Failed to run integrity check on $db_name: $integrity_result"
return 1
fi
if echo "$integrity_result" | grep -q "^ok$"; then
# Use shared repair script for integrity checking
if "$repair_script" check "$db_file" >/dev/null 2>&1; then
log_success "Database integrity check passed: $db_name"
return 0
else
log_warning "Database integrity issues detected in $db_name:"
echo "$integrity_result" | while read -r line; do
log_warning " $line"
done
return 1
local exit_code=$?
if [[ $exit_code -eq 2 ]]; then
log_error "Critical error during integrity check for $db_name"
return 2
else
log_warning "Database integrity issues detected in $db_name"
return 1
fi
fi
}
@@ -611,563 +746,48 @@ detect_early_corruption() {
}
# Enhanced database repair with multiple recovery strategies
# Database repair using shared repair script
repair_database() {
local db_file="$1"
local force_repair="${2:-false}"
local db_name
db_name=$(basename "$db_file")
local timestamp
timestamp=$(date "+%Y-%m-%d_%H.%M.%S")
local repair_script="${SCRIPT_DIR}/plex-database-repair.sh"
log_message "Attempting to repair corrupted database: $db_name"
log_message "Starting advanced database repair for: $db_name"
# Enhanced WAL file handling for repair
handle_wal_files_for_repair "$db_file" "prepare"
# Create multiple backup copies before attempting repair
local pre_repair_backup="${db_file}.pre-repair-backup"
local working_copy="${db_file}.working-${timestamp}"
if ! sudo cp "$db_file" "$pre_repair_backup"; then
log_error "Failed to create pre-repair backup"
handle_wal_files_for_repair "$db_file" "restore"
return 1
fi
# Force filesystem sync to prevent corruption
sync
if ! sudo cp "$db_file" "$working_copy"; then
log_error "Failed to create working copy"
handle_wal_files_for_repair "$db_file" "restore"
return 1
fi
# Force filesystem sync to prevent corruption
sync
log_success "Created pre-repair backup: $(basename "$pre_repair_backup")"
# Strategy 1: Try dump and restore approach
log_message "Step 1: Database cleanup and optimization..."
if attempt_dump_restore "$working_copy" "$db_file" "$timestamp"; then
log_success "Database repaired using dump/restore method"
handle_wal_files_for_repair "$db_file" "cleanup"
cleanup_repair_files "$pre_repair_backup" "$working_copy"
return 0
fi
# Strategy 2: Try schema recreation
if attempt_schema_recreation "$working_copy" "$db_file" "$timestamp"; then
log_success "Database repaired using schema recreation"
handle_wal_files_for_repair "$db_file" "cleanup"
cleanup_repair_files "$pre_repair_backup" "$working_copy"
return 0
fi
# Strategy 3: Try recovery from previous backup
if attempt_backup_recovery "$db_file" "$BACKUP_ROOT" "$pre_repair_backup"; then
log_success "Database recovered from previous backup"
handle_wal_files_for_repair "$db_file" "cleanup"
cleanup_repair_files "$pre_repair_backup" "$working_copy"
return 0
fi
# All strategies failed - restore original and flag for manual intervention
log_error "Database repair failed. Restoring original..."
if sudo cp "$pre_repair_backup" "$db_file"; then
# Force filesystem sync to prevent corruption
sync
log_success "Original database restored"
handle_wal_files_for_repair "$db_file" "restore"
else
log_error "Failed to restore original database!"
handle_wal_files_for_repair "$db_file" "restore"
# Check if shared repair script exists
if [[ ! -f "$repair_script" ]]; then
log_error "Database repair script not found at: $repair_script"
return 2
fi
log_error "Database repair failed for $db_name"
log_warning "Will backup corrupted database - manual intervention may be needed"
cleanup_repair_files "$pre_repair_backup" "$working_copy"
return 1
}
# Strategy 1: Dump and restore approach with enhanced validation
attempt_dump_restore() {
local working_copy="$1"
local original_db="$2"
local timestamp="$3"
local dump_file="${original_db}.dump-${timestamp}.sql"
local new_db="${original_db}.repaired-${timestamp}"
log_message "Attempting repair via SQL dump/restore..."
# Try to dump the database with error checking
log_info "Creating database dump..."
if sudo "$PLEX_SQLITE" "$working_copy" ".dump" 2>/dev/null | sudo tee "$dump_file" >/dev/null; then
# Validate the dump file exists and has substantial content
if [[ ! -f "$dump_file" ]]; then
log_warning "Dump file was not created"
return 1
fi
local dump_size
dump_size=$(stat -c%s "$dump_file" 2>/dev/null || echo "0")
if [[ "$dump_size" -lt 1024 ]]; then
log_warning "Dump file is too small ($dump_size bytes) - likely incomplete"
sudo rm -f "$dump_file"
return 1
fi
# Check for essential database structures in dump
if ! grep -q "CREATE TABLE" "$dump_file" 2>/dev/null; then
log_warning "Dump file contains no CREATE TABLE statements - dump is incomplete"
sudo rm -f "$dump_file"
return 1
fi
# Check for critical Plex tables
local critical_tables=("schema_migrations" "accounts" "library_sections")
local missing_tables=()
for table in "${critical_tables[@]}"; do
if ! grep -q "CREATE TABLE.*$table" "$dump_file" 2>/dev/null; then
missing_tables+=("$table")
fi
done
if [[ ${#missing_tables[@]} -gt 0 ]]; then
log_warning "Dump is missing critical tables: ${missing_tables[*]}"
log_warning "This would result in an incomplete database - aborting dump/restore"
sudo rm -f "$dump_file"
return 1
fi
log_success "Database dumped successfully (${dump_size} bytes)"
log_info "Dump contains all critical tables: ${critical_tables[*]}"
# Create new database from dump
log_info "Creating new database from validated dump..."
if sudo cat "$dump_file" | sudo "$PLEX_SQLITE" "$new_db" 2>/dev/null; then
# Verify the new database was created and has content
if [[ ! -f "$new_db" ]]; then
log_warning "New database file was not created"
sudo rm -f "$dump_file"
return 1
fi
local new_db_size
new_db_size=$(stat -c%s "$new_db" 2>/dev/null || echo "0")
if [[ "$new_db_size" -lt 1048576 ]]; then # Less than 1MB
log_warning "New database is too small ($new_db_size bytes) - likely empty or incomplete"
sudo rm -f "$new_db" "$dump_file"
return 1
fi
# Verify critical tables exist in new database
local table_count
table_count=$(sudo "$PLEX_SQLITE" "$new_db" "SELECT COUNT(*) FROM sqlite_master WHERE type='table';" 2>/dev/null || echo "0")
if [[ "$table_count" -lt 50 ]]; then # Plex should have way more than 50 tables
log_warning "New database has too few tables ($table_count) - likely incomplete"
sudo rm -f "$new_db" "$dump_file"
return 1
fi
# Verify schema_migrations table specifically (this was the root cause)
if ! sudo "$PLEX_SQLITE" "$new_db" "SELECT COUNT(*) FROM schema_migrations;" >/dev/null 2>&1; then
log_warning "New database missing schema_migrations table - Plex will not start"
sudo rm -f "$new_db" "$dump_file"
return 1
fi
log_success "New database created from dump ($new_db_size bytes, $table_count tables)"
# Verify the new database passes integrity check
log_info "Performing integrity check on repaired database..."
if sudo "$PLEX_SQLITE" "$new_db" "PRAGMA integrity_check;" 2>/dev/null | grep -q "ok"; then
log_success "New database passes integrity check"
# Replace original with repaired version
log_info "Replacing original database with repaired version..."
if sudo mv "$new_db" "$original_db"; then
# Force filesystem sync to prevent corruption
sync
sudo chown plex:plex "$original_db"
sudo chmod 644 "$original_db"
sudo rm -f "$dump_file"
log_success "Database successfully repaired and replaced"
return 0
else
log_error "Failed to replace original database with repaired version"
sudo rm -f "$dump_file"
return 1
fi
else
log_warning "Repaired database failed integrity check"
sudo rm -f "$new_db" "$dump_file"
return 1
fi
else
log_warning "Failed to create database from dump - SQL import failed"
sudo rm -f "$dump_file"
return 1
fi
else
log_warning "Failed to dump corrupted database - dump command failed"
# Clean up any potentially created but empty dump file
sudo rm -f "$dump_file"
return 1
fi
}
# Strategy 2: Schema recreation with data recovery
attempt_schema_recreation() {
local working_copy="$1"
local original_db="$2"
local timestamp="$3"
local schema_file="${original_db}.schema-${timestamp}.sql"
local new_db="${original_db}.rebuilt-${timestamp}"
log_message "Attempting repair via schema recreation..."
# Extract schema
if sudo "$PLEX_SQLITE" "$working_copy" ".schema" 2>/dev/null | sudo tee "$schema_file" >/dev/null; then
log_success "Schema extracted"
# Create new database with schema
if sudo cat "$schema_file" | sudo "$PLEX_SQLITE" "$new_db" 2>/dev/null; then
log_success "New database created with schema"
# Try to recover data table by table
if recover_table_data "$working_copy" "$new_db"; then
log_success "Data recovery completed"
# Verify the rebuilt database
if sudo "$PLEX_SQLITE" "$new_db" "PRAGMA integrity_check;" 2>/dev/null | grep -q "ok"; then
log_success "Rebuilt database passes integrity check"
if sudo mv "$new_db" "$original_db"; then
# Force filesystem sync to prevent corruption
sync
sudo chown plex:plex "$original_db"
sudo chmod 644 "$original_db"
sudo rm -f "$schema_file"
return 0
fi
else
log_warning "Rebuilt database failed integrity check"
fi
fi
fi
sudo rm -f "$new_db" "$schema_file"
# Use the shared repair script
local repair_command="repair"
if [[ "$force_repair" == "true" ]]; then
repair_command="force-repair"
fi
return 1
}
# Strategy 3: Recovery from previous backup
attempt_backup_recovery() {
local original_db="$1"
local backup_dir="$2"
local current_backup="$3"
log_message "Attempting recovery from previous backup..."
# Find the most recent backup that's not the current corrupted one
local latest_backup
if [[ -n "$current_backup" ]]; then
# Exclude the current backup from consideration
latest_backup=$(find "$backup_dir" -name "plex-backup-*.tar.gz" -type f ! -samefile "$current_backup" -printf '%T@ %p\n' 2>/dev/null | sort -nr | head -1 | cut -d' ' -f2-)
else
latest_backup=$(find "$backup_dir" -name "plex-backup-*.tar.gz" -type f -printf '%T@ %p\n' 2>/dev/null | sort -nr | head -1 | cut -d' ' -f2-)
fi
if [[ -n "$latest_backup" && -f "$latest_backup" ]]; then
log_message "Found recent backup: $(basename "$latest_backup")"
local temp_restore_dir="/tmp/plex-restore-$$"
mkdir -p "$temp_restore_dir"
# Extract the backup
if tar -xzf "$latest_backup" -C "$temp_restore_dir" 2>/dev/null; then
local restored_db
restored_db="${temp_restore_dir}/$(basename "$original_db")"
if [[ -f "$restored_db" ]]; then
# Verify the restored database
if sudo "$PLEX_SQLITE" "$restored_db" "PRAGMA integrity_check;" 2>/dev/null | grep -q "ok"; then
log_success "Backup database passes integrity check"
if sudo cp "$restored_db" "$original_db"; then
# Force filesystem sync to prevent corruption
sync
sudo chown plex:plex "$original_db"
sudo chmod 644 "$original_db"
log_success "Database restored from backup"
rm -rf "$temp_restore_dir"
return 0
fi
else
log_warning "Backup database also corrupted"
fi
fi
fi
rm -rf "$temp_restore_dir"
fi
return 1
}
# Recovery helper for table data
recover_table_data() {
local source_db="$1"
local target_db="$2"
# Get list of tables
local tables
tables=$(sudo "$PLEX_SQLITE" "$source_db" ".tables" 2>/dev/null)
if [[ -z "$tables" ]]; then
log_warning "No tables found in source database"
return 1
fi
local recovered_count=0
local total_tables=0
for table in $tables; do
((total_tables++))
# Try to copy data from each table
if sudo "$PLEX_SQLITE" "$source_db" ".mode insert $table" ".output | sudo tee /tmp/table_data_$$.sql > /dev/null" "SELECT * FROM $table;" ".output stdout" 2>/dev/null && \
sudo cat "/tmp/table_data_$$.sql" | sudo "$PLEX_SQLITE" "$target_db" 2>/dev/null; then
((recovered_count++))
sudo rm -f "/tmp/table_data_$$.sql" 2>/dev/null || true
else
log_warning "Failed to recover data from table: $table"
sudo rm -f "/tmp/table_data_$$.sql" 2>/dev/null || true
fi
done
log_message "Recovered $recovered_count/$total_tables tables"
# Consider successful if we recovered at least 80% of tables
# Prevent division by zero
if [ "$total_tables" -eq 0 ]; then
log_warning "No tables found for recovery"
return 1
fi
if (( recovered_count * 100 / total_tables >= 80 )); then
return 0
fi
return 1
}
# Cleanup helper function
cleanup_repair_files() {
local pre_repair_backup="$1"
local working_copy="$2"
if [[ -n "$pre_repair_backup" && -f "$pre_repair_backup" ]]; then
sudo rm -f "$pre_repair_backup" 2>/dev/null || true
fi
if [[ -n "$working_copy" && -f "$working_copy" ]]; then
sudo rm -f "$working_copy" 2>/dev/null || true
fi
}
# WAL (Write-Ahead Logging) file handling
handle_wal_files() {
local action="$1" # "backup" or "restore"
local backup_path="$2"
log_info "Handling WAL files: $action"
# Define WAL files that might exist
local wal_files=(
"/var/lib/plexmediaserver/Library/Application Support/Plex Media Server/Plug-in Support/Databases/com.plexapp.plugins.library.db-wal"
"/var/lib/plexmediaserver/Library/Application Support/Plex Media Server/Plug-in Support/Databases/com.plexapp.plugins.library.db-shm"
"/var/lib/plexmediaserver/Library/Application Support/Plex Media Server/Plug-in Support/Databases/com.plexapp.plugins.library.blobs.db-wal"
"/var/lib/plexmediaserver/Library/Application Support/Plex Media Server/Plug-in Support/Databases/com.plexapp.plugins.library.blobs.db-shm"
)
for wal_file in "${wal_files[@]}"; do
local wal_basename
wal_basename=$(basename "$wal_file")
case "$action" in
"backup")
if [ -f "$wal_file" ]; then
log_info "Found WAL/SHM file: $wal_basename"
local backup_file="${backup_path}/${wal_basename}"
if sudo cp "$wal_file" "$backup_file"; then
# Force filesystem sync to prevent corruption
sync
log_success "Backed up WAL/SHM file: $wal_basename"
# Verify backup
if verify_backup "$wal_file" "$backup_file"; then
log_success "Verified WAL/SHM backup: $wal_basename"
else
log_warning "WAL/SHM backup verification failed: $wal_basename"
fi
else
log_warning "Failed to backup WAL/SHM file: $wal_basename"
fi
else
log_info "WAL/SHM file not found (normal): $wal_basename"
fi
;;
"checkpoint")
# Force WAL checkpoint to integrate changes into main database
local db_file="${wal_file%.db-*}.db"
if [ -f "$db_file" ] && [ -f "$wal_file" ]; then
log_info "Performing WAL checkpoint for: $(basename "$db_file")"
if sudo "$PLEX_SQLITE" "$db_file" "PRAGMA wal_checkpoint(FULL);" 2>/dev/null; then
log_success "WAL checkpoint completed for: $(basename "$db_file")"
else
log_warning "WAL checkpoint failed for: $(basename "$db_file")"
fi
fi
;;
esac
done
}
# Enhanced WAL file management for repair operations
handle_wal_files_for_repair() {
local db_file="$1"
local operation="${2:-prepare}" # prepare, cleanup, or restore
local db_dir
db_dir=$(dirname "$db_file")
local db_base
db_base=$(basename "$db_file" .db)
local wal_file="${db_dir}/${db_base}.db-wal"
local shm_file="${db_dir}/${db_base}.db-shm"
case "$operation" in
"prepare")
log_message "Preparing WAL files for repair of $(basename "$db_file")"
# Force WAL checkpoint to consolidate all changes
if [ -f "$wal_file" ]; then
log_info "Found WAL file, performing checkpoint..."
if sudo "$PLEX_SQLITE" "$db_file" "PRAGMA wal_checkpoint(TRUNCATE);" 2>/dev/null; then
log_success "WAL checkpoint completed"
else
log_warning "WAL checkpoint failed, continuing with repair"
fi
fi
# Create backup copies of WAL/SHM files if they exist
for file in "$wal_file" "$shm_file"; do
if [ -f "$file" ]; then
local backup_file="${file}.repair-backup"
if sudo cp "$file" "$backup_file" 2>/dev/null; then
# Force filesystem sync to prevent corruption
sync
log_info "Backed up $(basename "$file") for repair"
fi
fi
done
;;
"cleanup")
log_message "Cleaning up WAL files after repair"
# Remove any remaining WAL/SHM files to force clean state
for file in "$wal_file" "$shm_file"; do
if [ -f "$file" ]; then
if sudo rm -f "$file" 2>/dev/null; then
log_info "Removed $(basename "$file") for clean state"
fi
fi
done
# Force WAL mode back on for consistency
if sudo "$PLEX_SQLITE" "$db_file" "PRAGMA journal_mode=WAL;" 2>/dev/null | grep -q "wal"; then
log_success "WAL mode restored for $(basename "$db_file")"
else
log_warning "Failed to restore WAL mode for $(basename "$db_file")"
fi
;;
"restore")
log_message "Restoring WAL files after failed repair"
# Restore WAL/SHM backup files if they exist
for file in "$wal_file" "$shm_file"; do
local backup_file="${file}.repair-backup"
if [ -f "$backup_file" ]; then
if sudo mv "$backup_file" "$file" 2>/dev/null; then
log_info "Restored $(basename "$file") from backup"
else
log_warning "Failed to restore $(basename "$file") from backup"
# Try to remove broken backup file
sudo rm -f "$backup_file" 2>/dev/null || true
fi
else
log_info "No backup found for $(basename "$file")"
fi
done
;;
esac
}
# Enhanced database integrity check with WAL handling
check_database_integrity_with_wal() {
local db_file="$1"
local db_name
db_name=$(basename "$db_file")
log_message "Checking database integrity with WAL handling: $db_name"
# Check if Plex SQLite exists
if [ ! -f "$PLEX_SQLITE" ]; then
log_error "Plex SQLite binary not found at: $PLEX_SQLITE"
return 1
fi
# Make Plex SQLite executable if it isn't already
sudo chmod +x "$PLEX_SQLITE" 2>/dev/null || true
# Check if WAL file exists and handle it
local wal_file="${db_file}-wal"
if [ -f "$wal_file" ]; then
log_info "WAL file found for $db_name, performing checkpoint..."
if sudo "$PLEX_SQLITE" "$db_file" "PRAGMA wal_checkpoint(FULL);" 2>/dev/null; then
log_success "WAL checkpoint completed for $db_name"
else
log_warning "WAL checkpoint failed for $db_name, proceeding with integrity check"
fi
fi
# Run integrity check
local integrity_result
integrity_result=$(sudo "$PLEX_SQLITE" "$db_file" "PRAGMA integrity_check;" 2>&1)
local check_exit_code=$?
if [ $check_exit_code -ne 0 ]; then
log_error "Failed to run integrity check on $db_name: $integrity_result"
return 1
fi
if echo "$integrity_result" | grep -q "^ok$"; then
log_success "Database integrity check passed: $db_name"
log_message "Using shared repair script for database repair..."
if "$repair_script" "$repair_command" "$db_file"; then
log_success "Database repaired successfully using shared repair script"
return 0
else
log_warning "Database integrity issues detected in $db_name:"
echo "$integrity_result" | while read -r line; do
log_warning " $line"
done
return 1
local exit_code=$?
if [[ $exit_code -eq 2 ]]; then
log_error "Critical error during database repair"
return 2
else
log_error "Database repair failed"
log_warning "Will backup corrupted database - manual intervention may be needed"
return 1
fi
fi
}
# Parallel verification function
verify_files_parallel() {
local backup_dir="$1"
@@ -1603,7 +1223,7 @@ check_integrity_only() {
databases_checked=$((databases_checked + 1))
log_message "Checking integrity of $(basename "$file")..."
if ! check_database_integrity_with_wal "$file"; then
if ! check_database_integrity "$file"; then
db_integrity_issues=$((db_integrity_issues + 1))
log_warning "Database integrity issues found in $(basename "$file")"
@@ -1673,6 +1293,9 @@ main() {
# Create necessary directories
mkdir -p "${BACKUP_ROOT}"
mkdir -p "${LOCAL_LOG_ROOT}"
# Ensure acedanger owns the log directories
sudo chown -R acedanger:acedanger "${LOCAL_LOG_ROOT}" 2>/dev/null || true
# Initialize logs
initialize_logs
@@ -1718,7 +1341,7 @@ main() {
# Only check database files
if [[ "$file" == *".db" ]] && [ -f "$file" ]; then
if ! check_database_integrity_with_wal "$file"; then
if ! check_database_integrity "$file"; then
db_integrity_issues=$((db_integrity_issues + 1))
log_warning "Database integrity issues found in $(basename "$file")"
@@ -1752,7 +1375,7 @@ main() {
log_success "Database repair successful for $(basename "$file")"
# Re-verify integrity after repair
if check_database_integrity_with_wal "$file"; then
if check_database_integrity "$file"; then
log_success "Post-repair integrity verification passed for $(basename "$file")"
# Decrement issue count since repair was successful
db_integrity_issues=$((db_integrity_issues - 1))