fix: Clean up unnecessary whitespace and improve code readability in backup-plex.sh

This commit is contained in:
Peter Wood
2025-06-12 17:18:57 -04:00
parent 2b979f0950
commit 7d45acf6c6

View File

@@ -97,11 +97,7 @@ while [[ $# -gt 0 ]]; do
shift shift
;; ;;
--disable-auto-repair) --disable-auto-repair)
AUTO_REPAIR=false AUTO_REPAIR=false.service
shift
;;
--check-integrity)
INTEGRITY_CHECK_ONLY=true
shift shift
;; ;;
--non-interactive) --non-interactive)
@@ -550,12 +546,12 @@ detect_early_corruption() {
local db_file="$1" local db_file="$1"
local db_name local db_name
db_name=$(basename "$db_file") db_name=$(basename "$db_file")
log_message "Performing early corruption detection for: $db_name" log_message "Performing early corruption detection for: $db_name"
# Check for early warning signs of corruption # Check for early warning signs of corruption
local warning_count=0 local warning_count=0
# 1. Check for WAL file size anomalies # 1. Check for WAL file size anomalies
local wal_file="${db_file}-wal" local wal_file="${db_file}-wal"
if [ -f "$wal_file" ]; then if [ -f "$wal_file" ]; then
@@ -563,7 +559,7 @@ detect_early_corruption() {
wal_size=$(stat -f%z "$wal_file" 2>/dev/null || stat -c%s "$wal_file" 2>/dev/null || echo "0") wal_size=$(stat -f%z "$wal_file" 2>/dev/null || stat -c%s "$wal_file" 2>/dev/null || echo "0")
local db_size local db_size
db_size=$(stat -f%z "$db_file" 2>/dev/null || stat -c%s "$db_file" 2>/dev/null || echo "0") db_size=$(stat -f%z "$db_file" 2>/dev/null || stat -c%s "$db_file" 2>/dev/null || echo "0")
# If WAL file is more than 10% of database size, it might indicate issues # If WAL file is more than 10% of database size, it might indicate issues
if [ "$wal_size" -gt 0 ] && [ "$db_size" -gt 0 ]; then if [ "$wal_size" -gt 0 ] && [ "$db_size" -gt 0 ]; then
local wal_ratio=$((wal_size * 100 / db_size)) local wal_ratio=$((wal_size * 100 / db_size))
@@ -575,7 +571,7 @@ detect_early_corruption() {
log_info "Unable to determine file sizes for WAL analysis" log_info "Unable to determine file sizes for WAL analysis"
fi fi
fi fi
# 2. Quick integrity check focused on critical issues # 2. Quick integrity check focused on critical issues
local quick_check local quick_check
if ! quick_check=$(sudo "$PLEX_SQLITE" "$db_file" "PRAGMA quick_check(5);" 2>&1); then if ! quick_check=$(sudo "$PLEX_SQLITE" "$db_file" "PRAGMA quick_check(5);" 2>&1); then
@@ -586,7 +582,7 @@ detect_early_corruption() {
log_warning "Issues found: $quick_check" log_warning "Issues found: $quick_check"
((warning_count++)) ((warning_count++))
fi fi
# 3. Check for foreign key violations (common early corruption sign) # 3. Check for foreign key violations (common early corruption sign)
local fk_check local fk_check
if fk_check=$(sudo "$PLEX_SQLITE" "$db_file" "PRAGMA foreign_key_check;" 2>/dev/null); then if fk_check=$(sudo "$PLEX_SQLITE" "$db_file" "PRAGMA foreign_key_check;" 2>/dev/null); then
@@ -597,13 +593,13 @@ detect_early_corruption() {
else else
log_info "Foreign key check unavailable for $db_name" log_info "Foreign key check unavailable for $db_name"
fi fi
# 4. Check database statistics for anomalies # 4. Check database statistics for anomalies
if ! sudo "$PLEX_SQLITE" "$db_file" "PRAGMA compile_options;" >/dev/null 2>&1; then if ! sudo "$PLEX_SQLITE" "$db_file" "PRAGMA compile_options;" >/dev/null 2>&1; then
log_warning "Database statistics check failed for $db_name" log_warning "Database statistics check failed for $db_name"
((warning_count++)) ((warning_count++))
fi fi
if [ "$warning_count" -gt 0 ]; then if [ "$warning_count" -gt 0 ]; then
log_warning "Early corruption indicators detected ($warning_count warnings) in $db_name" log_warning "Early corruption indicators detected ($warning_count warnings) in $db_name"
log_warning "Consider performing preventive maintenance or monitoring more closely" log_warning "Consider performing preventive maintenance or monitoring more closely"
@@ -621,17 +617,17 @@ repair_database() {
db_name=$(basename "$db_file") db_name=$(basename "$db_file")
local timestamp local timestamp
timestamp=$(date "+%Y-%m-%d_%H.%M.%S") timestamp=$(date "+%Y-%m-%d_%H.%M.%S")
log_message "Attempting to repair corrupted database: $db_name" log_message "Attempting to repair corrupted database: $db_name"
log_message "Starting advanced database repair for: $db_name" log_message "Starting advanced database repair for: $db_name"
# Enhanced WAL file handling for repair # Enhanced WAL file handling for repair
handle_wal_files_for_repair "$db_file" "prepare" handle_wal_files_for_repair "$db_file" "prepare"
# Create multiple backup copies before attempting repair # Create multiple backup copies before attempting repair
local pre_repair_backup="${db_file}.pre-repair-backup" local pre_repair_backup="${db_file}.pre-repair-backup"
local working_copy="${db_file}.working-${timestamp}" local working_copy="${db_file}.working-${timestamp}"
if ! sudo cp "$db_file" "$pre_repair_backup"; then if ! sudo cp "$db_file" "$pre_repair_backup"; then
log_error "Failed to create pre-repair backup" log_error "Failed to create pre-repair backup"
handle_wal_files_for_repair "$db_file" "restore" handle_wal_files_for_repair "$db_file" "restore"
@@ -647,9 +643,9 @@ repair_database() {
fi fi
# Force filesystem sync to prevent corruption # Force filesystem sync to prevent corruption
sync sync
log_success "Created pre-repair backup: $(basename "$pre_repair_backup")" log_success "Created pre-repair backup: $(basename "$pre_repair_backup")"
# Strategy 1: Try dump and restore approach # Strategy 1: Try dump and restore approach
log_message "Step 1: Database cleanup and optimization..." log_message "Step 1: Database cleanup and optimization..."
if attempt_dump_restore "$working_copy" "$db_file" "$timestamp"; then if attempt_dump_restore "$working_copy" "$db_file" "$timestamp"; then
@@ -658,7 +654,7 @@ repair_database() {
cleanup_repair_files "$pre_repair_backup" "$working_copy" cleanup_repair_files "$pre_repair_backup" "$working_copy"
return 0 return 0
fi fi
# Strategy 2: Try schema recreation # Strategy 2: Try schema recreation
if attempt_schema_recreation "$working_copy" "$db_file" "$timestamp"; then if attempt_schema_recreation "$working_copy" "$db_file" "$timestamp"; then
log_success "Database repaired using schema recreation" log_success "Database repaired using schema recreation"
@@ -666,7 +662,7 @@ repair_database() {
cleanup_repair_files "$pre_repair_backup" "$working_copy" cleanup_repair_files "$pre_repair_backup" "$working_copy"
return 0 return 0
fi fi
# Strategy 3: Try recovery from previous backup # Strategy 3: Try recovery from previous backup
if attempt_backup_recovery "$db_file" "$BACKUP_ROOT" "$pre_repair_backup"; then if attempt_backup_recovery "$db_file" "$BACKUP_ROOT" "$pre_repair_backup"; then
log_success "Database recovered from previous backup" log_success "Database recovered from previous backup"
@@ -674,7 +670,7 @@ repair_database() {
cleanup_repair_files "$pre_repair_backup" "$working_copy" cleanup_repair_files "$pre_repair_backup" "$working_copy"
return 0 return 0
fi fi
# All strategies failed - restore original and flag for manual intervention # All strategies failed - restore original and flag for manual intervention
log_error "Database repair failed. Restoring original..." log_error "Database repair failed. Restoring original..."
if sudo cp "$pre_repair_backup" "$db_file"; then if sudo cp "$pre_repair_backup" "$db_file"; then
@@ -687,7 +683,7 @@ repair_database() {
handle_wal_files_for_repair "$db_file" "restore" handle_wal_files_for_repair "$db_file" "restore"
return 2 return 2
fi fi
log_error "Database repair failed for $db_name" log_error "Database repair failed for $db_name"
log_warning "Will backup corrupted database - manual intervention may be needed" log_warning "Will backup corrupted database - manual intervention may be needed"
cleanup_repair_files "$pre_repair_backup" "$working_copy" cleanup_repair_files "$pre_repair_backup" "$working_copy"
@@ -701,9 +697,9 @@ attempt_dump_restore() {
local timestamp="$3" local timestamp="$3"
local dump_file="${original_db}.dump-${timestamp}.sql" local dump_file="${original_db}.dump-${timestamp}.sql"
local new_db="${original_db}.repaired-${timestamp}" local new_db="${original_db}.repaired-${timestamp}"
log_message "Attempting repair via SQL dump/restore..." log_message "Attempting repair via SQL dump/restore..."
# Try to dump the database with error checking # Try to dump the database with error checking
log_info "Creating database dump..." log_info "Creating database dump..."
if sudo "$PLEX_SQLITE" "$working_copy" ".dump" 2>/dev/null | sudo tee "$dump_file" >/dev/null; then if sudo "$PLEX_SQLITE" "$working_copy" ".dump" 2>/dev/null | sudo tee "$dump_file" >/dev/null; then
@@ -712,7 +708,7 @@ attempt_dump_restore() {
log_warning "Dump file was not created" log_warning "Dump file was not created"
return 1 return 1
fi fi
local dump_size local dump_size
dump_size=$(stat -c%s "$dump_file" 2>/dev/null || echo "0") dump_size=$(stat -c%s "$dump_file" 2>/dev/null || echo "0")
if [[ "$dump_size" -lt 1024 ]]; then if [[ "$dump_size" -lt 1024 ]]; then
@@ -720,14 +716,14 @@ attempt_dump_restore() {
sudo rm -f "$dump_file" sudo rm -f "$dump_file"
return 1 return 1
fi fi
# Check for essential database structures in dump # Check for essential database structures in dump
if ! grep -q "CREATE TABLE" "$dump_file" 2>/dev/null; then if ! grep -q "CREATE TABLE" "$dump_file" 2>/dev/null; then
log_warning "Dump file contains no CREATE TABLE statements - dump is incomplete" log_warning "Dump file contains no CREATE TABLE statements - dump is incomplete"
sudo rm -f "$dump_file" sudo rm -f "$dump_file"
return 1 return 1
fi fi
# Check for critical Plex tables # Check for critical Plex tables
local critical_tables=("schema_migrations" "accounts" "library_sections") local critical_tables=("schema_migrations" "accounts" "library_sections")
local missing_tables=() local missing_tables=()
@@ -736,17 +732,17 @@ attempt_dump_restore() {
missing_tables+=("$table") missing_tables+=("$table")
fi fi
done done
if [[ ${#missing_tables[@]} -gt 0 ]]; then if [[ ${#missing_tables[@]} -gt 0 ]]; then
log_warning "Dump is missing critical tables: ${missing_tables[*]}" log_warning "Dump is missing critical tables: ${missing_tables[*]}"
log_warning "This would result in an incomplete database - aborting dump/restore" log_warning "This would result in an incomplete database - aborting dump/restore"
sudo rm -f "$dump_file" sudo rm -f "$dump_file"
return 1 return 1
fi fi
log_success "Database dumped successfully (${dump_size} bytes)" log_success "Database dumped successfully (${dump_size} bytes)"
log_info "Dump contains all critical tables: ${critical_tables[*]}" log_info "Dump contains all critical tables: ${critical_tables[*]}"
# Create new database from dump # Create new database from dump
log_info "Creating new database from validated dump..." log_info "Creating new database from validated dump..."
if sudo cat "$dump_file" | sudo "$PLEX_SQLITE" "$new_db" 2>/dev/null; then if sudo cat "$dump_file" | sudo "$PLEX_SQLITE" "$new_db" 2>/dev/null; then
@@ -756,7 +752,7 @@ attempt_dump_restore() {
sudo rm -f "$dump_file" sudo rm -f "$dump_file"
return 1 return 1
fi fi
local new_db_size local new_db_size
new_db_size=$(stat -c%s "$new_db" 2>/dev/null || echo "0") new_db_size=$(stat -c%s "$new_db" 2>/dev/null || echo "0")
if [[ "$new_db_size" -lt 1048576 ]]; then # Less than 1MB if [[ "$new_db_size" -lt 1048576 ]]; then # Less than 1MB
@@ -764,7 +760,7 @@ attempt_dump_restore() {
sudo rm -f "$new_db" "$dump_file" sudo rm -f "$new_db" "$dump_file"
return 1 return 1
fi fi
# Verify critical tables exist in new database # Verify critical tables exist in new database
local table_count local table_count
table_count=$(sudo "$PLEX_SQLITE" "$new_db" "SELECT COUNT(*) FROM sqlite_master WHERE type='table';" 2>/dev/null || echo "0") table_count=$(sudo "$PLEX_SQLITE" "$new_db" "SELECT COUNT(*) FROM sqlite_master WHERE type='table';" 2>/dev/null || echo "0")
@@ -773,21 +769,21 @@ attempt_dump_restore() {
sudo rm -f "$new_db" "$dump_file" sudo rm -f "$new_db" "$dump_file"
return 1 return 1
fi fi
# Verify schema_migrations table specifically (this was the root cause) # Verify schema_migrations table specifically (this was the root cause)
if ! sudo "$PLEX_SQLITE" "$new_db" "SELECT COUNT(*) FROM schema_migrations;" >/dev/null 2>&1; then if ! sudo "$PLEX_SQLITE" "$new_db" "SELECT COUNT(*) FROM schema_migrations;" >/dev/null 2>&1; then
log_warning "New database missing schema_migrations table - Plex will not start" log_warning "New database missing schema_migrations table - Plex will not start"
sudo rm -f "$new_db" "$dump_file" sudo rm -f "$new_db" "$dump_file"
return 1 return 1
fi fi
log_success "New database created from dump ($new_db_size bytes, $table_count tables)" log_success "New database created from dump ($new_db_size bytes, $table_count tables)"
# Verify the new database passes integrity check # Verify the new database passes integrity check
log_info "Performing integrity check on repaired database..." log_info "Performing integrity check on repaired database..."
if sudo "$PLEX_SQLITE" "$new_db" "PRAGMA integrity_check;" 2>/dev/null | grep -q "ok"; then if sudo "$PLEX_SQLITE" "$new_db" "PRAGMA integrity_check;" 2>/dev/null | grep -q "ok"; then
log_success "New database passes integrity check" log_success "New database passes integrity check"
# Replace original with repaired version # Replace original with repaired version
log_info "Replacing original database with repaired version..." log_info "Replacing original database with repaired version..."
if sudo mv "$new_db" "$original_db"; then if sudo mv "$new_db" "$original_db"; then
@@ -828,25 +824,25 @@ attempt_schema_recreation() {
local timestamp="$3" local timestamp="$3"
local schema_file="${original_db}.schema-${timestamp}.sql" local schema_file="${original_db}.schema-${timestamp}.sql"
local new_db="${original_db}.rebuilt-${timestamp}" local new_db="${original_db}.rebuilt-${timestamp}"
log_message "Attempting repair via schema recreation..." log_message "Attempting repair via schema recreation..."
# Extract schema # Extract schema
if sudo "$PLEX_SQLITE" "$working_copy" ".schema" 2>/dev/null | sudo tee "$schema_file" >/dev/null; then if sudo "$PLEX_SQLITE" "$working_copy" ".schema" 2>/dev/null | sudo tee "$schema_file" >/dev/null; then
log_success "Schema extracted" log_success "Schema extracted"
# Create new database with schema # Create new database with schema
if sudo cat "$schema_file" | sudo "$PLEX_SQLITE" "$new_db" 2>/dev/null; then if sudo cat "$schema_file" | sudo "$PLEX_SQLITE" "$new_db" 2>/dev/null; then
log_success "New database created with schema" log_success "New database created with schema"
# Try to recover data table by table # Try to recover data table by table
if recover_table_data "$working_copy" "$new_db"; then if recover_table_data "$working_copy" "$new_db"; then
log_success "Data recovery completed" log_success "Data recovery completed"
# Verify the rebuilt database # Verify the rebuilt database
if sudo "$PLEX_SQLITE" "$new_db" "PRAGMA integrity_check;" 2>/dev/null | grep -q "ok"; then if sudo "$PLEX_SQLITE" "$new_db" "PRAGMA integrity_check;" 2>/dev/null | grep -q "ok"; then
log_success "Rebuilt database passes integrity check" log_success "Rebuilt database passes integrity check"
if sudo mv "$new_db" "$original_db"; then if sudo mv "$new_db" "$original_db"; then
# Force filesystem sync to prevent corruption # Force filesystem sync to prevent corruption
sync sync
@@ -860,10 +856,10 @@ attempt_schema_recreation() {
fi fi
fi fi
fi fi
sudo rm -f "$new_db" "$schema_file" sudo rm -f "$new_db" "$schema_file"
fi fi
return 1 return 1
} }
@@ -872,9 +868,9 @@ attempt_backup_recovery() {
local original_db="$1" local original_db="$1"
local backup_dir="$2" local backup_dir="$2"
local current_backup="$3" local current_backup="$3"
log_message "Attempting recovery from previous backup..." log_message "Attempting recovery from previous backup..."
# Find the most recent backup that's not the current corrupted one # Find the most recent backup that's not the current corrupted one
local latest_backup local latest_backup
if [[ -n "$current_backup" ]]; then if [[ -n "$current_backup" ]]; then
@@ -883,23 +879,23 @@ attempt_backup_recovery() {
else else
latest_backup=$(find "$backup_dir" -name "plex-backup-*.tar.gz" -type f -printf '%T@ %p\n' 2>/dev/null | sort -nr | head -1 | cut -d' ' -f2-) latest_backup=$(find "$backup_dir" -name "plex-backup-*.tar.gz" -type f -printf '%T@ %p\n' 2>/dev/null | sort -nr | head -1 | cut -d' ' -f2-)
fi fi
if [[ -n "$latest_backup" && -f "$latest_backup" ]]; then if [[ -n "$latest_backup" && -f "$latest_backup" ]]; then
log_message "Found recent backup: $(basename "$latest_backup")" log_message "Found recent backup: $(basename "$latest_backup")"
local temp_restore_dir="/tmp/plex-restore-$$" local temp_restore_dir="/tmp/plex-restore-$$"
mkdir -p "$temp_restore_dir" mkdir -p "$temp_restore_dir"
# Extract the backup # Extract the backup
if tar -xzf "$latest_backup" -C "$temp_restore_dir" 2>/dev/null; then if tar -xzf "$latest_backup" -C "$temp_restore_dir" 2>/dev/null; then
local restored_db local restored_db
restored_db="${temp_restore_dir}/$(basename "$original_db")" restored_db="${temp_restore_dir}/$(basename "$original_db")"
if [[ -f "$restored_db" ]]; then if [[ -f "$restored_db" ]]; then
# Verify the restored database # Verify the restored database
if sudo "$PLEX_SQLITE" "$restored_db" "PRAGMA integrity_check;" 2>/dev/null | grep -q "ok"; then if sudo "$PLEX_SQLITE" "$restored_db" "PRAGMA integrity_check;" 2>/dev/null | grep -q "ok"; then
log_success "Backup database passes integrity check" log_success "Backup database passes integrity check"
if sudo cp "$restored_db" "$original_db"; then if sudo cp "$restored_db" "$original_db"; then
# Force filesystem sync to prevent corruption # Force filesystem sync to prevent corruption
sync sync
@@ -914,10 +910,10 @@ attempt_backup_recovery() {
fi fi
fi fi
fi fi
rm -rf "$temp_restore_dir" rm -rf "$temp_restore_dir"
fi fi
return 1 return 1
} }
@@ -925,22 +921,22 @@ attempt_backup_recovery() {
recover_table_data() { recover_table_data() {
local source_db="$1" local source_db="$1"
local target_db="$2" local target_db="$2"
# Get list of tables # Get list of tables
local tables local tables
tables=$(sudo "$PLEX_SQLITE" "$source_db" ".tables" 2>/dev/null) tables=$(sudo "$PLEX_SQLITE" "$source_db" ".tables" 2>/dev/null)
if [[ -z "$tables" ]]; then if [[ -z "$tables" ]]; then
log_warning "No tables found in source database" log_warning "No tables found in source database"
return 1 return 1
fi fi
local recovered_count=0 local recovered_count=0
local total_tables=0 local total_tables=0
for table in $tables; do for table in $tables; do
((total_tables++)) ((total_tables++))
# Try to copy data from each table # Try to copy data from each table
if sudo "$PLEX_SQLITE" "$source_db" ".mode insert $table" ".output | sudo tee /tmp/table_data_$$.sql > /dev/null" "SELECT * FROM $table;" ".output stdout" 2>/dev/null && \ if sudo "$PLEX_SQLITE" "$source_db" ".mode insert $table" ".output | sudo tee /tmp/table_data_$$.sql > /dev/null" "SELECT * FROM $table;" ".output stdout" 2>/dev/null && \
sudo cat "/tmp/table_data_$$.sql" | sudo "$PLEX_SQLITE" "$target_db" 2>/dev/null; then sudo cat "/tmp/table_data_$$.sql" | sudo "$PLEX_SQLITE" "$target_db" 2>/dev/null; then
@@ -951,20 +947,20 @@ recover_table_data() {
sudo rm -f "/tmp/table_data_$$.sql" 2>/dev/null || true sudo rm -f "/tmp/table_data_$$.sql" 2>/dev/null || true
fi fi
done done
log_message "Recovered $recovered_count/$total_tables tables" log_message "Recovered $recovered_count/$total_tables tables"
# Consider successful if we recovered at least 80% of tables # Consider successful if we recovered at least 80% of tables
# Prevent division by zero # Prevent division by zero
if [ "$total_tables" -eq 0 ]; then if [ "$total_tables" -eq 0 ]; then
log_warning "No tables found for recovery" log_warning "No tables found for recovery"
return 1 return 1
fi fi
if (( recovered_count * 100 / total_tables >= 80 )); then if (( recovered_count * 100 / total_tables >= 80 )); then
return 0 return 0
fi fi
return 1 return 1
} }
@@ -972,11 +968,11 @@ recover_table_data() {
cleanup_repair_files() { cleanup_repair_files() {
local pre_repair_backup="$1" local pre_repair_backup="$1"
local working_copy="$2" local working_copy="$2"
if [[ -n "$pre_repair_backup" && -f "$pre_repair_backup" ]]; then if [[ -n "$pre_repair_backup" && -f "$pre_repair_backup" ]]; then
sudo rm -f "$pre_repair_backup" 2>/dev/null || true sudo rm -f "$pre_repair_backup" 2>/dev/null || true
fi fi
if [[ -n "$working_copy" && -f "$working_copy" ]]; then if [[ -n "$working_copy" && -f "$working_copy" ]]; then
sudo rm -f "$working_copy" 2>/dev/null || true sudo rm -f "$working_copy" 2>/dev/null || true
fi fi
@@ -1045,18 +1041,18 @@ handle_wal_files() {
handle_wal_files_for_repair() { handle_wal_files_for_repair() {
local db_file="$1" local db_file="$1"
local operation="${2:-prepare}" # prepare, cleanup, or restore local operation="${2:-prepare}" # prepare, cleanup, or restore
local db_dir local db_dir
db_dir=$(dirname "$db_file") db_dir=$(dirname "$db_file")
local db_base local db_base
db_base=$(basename "$db_file" .db) db_base=$(basename "$db_file" .db)
local wal_file="${db_dir}/${db_base}.db-wal" local wal_file="${db_dir}/${db_base}.db-wal"
local shm_file="${db_dir}/${db_base}.db-shm" local shm_file="${db_dir}/${db_base}.db-shm"
case "$operation" in case "$operation" in
"prepare") "prepare")
log_message "Preparing WAL files for repair of $(basename "$db_file")" log_message "Preparing WAL files for repair of $(basename "$db_file")"
# Force WAL checkpoint to consolidate all changes # Force WAL checkpoint to consolidate all changes
if [ -f "$wal_file" ]; then if [ -f "$wal_file" ]; then
log_info "Found WAL file, performing checkpoint..." log_info "Found WAL file, performing checkpoint..."
@@ -1066,7 +1062,7 @@ handle_wal_files_for_repair() {
log_warning "WAL checkpoint failed, continuing with repair" log_warning "WAL checkpoint failed, continuing with repair"
fi fi
fi fi
# Create backup copies of WAL/SHM files if they exist # Create backup copies of WAL/SHM files if they exist
for file in "$wal_file" "$shm_file"; do for file in "$wal_file" "$shm_file"; do
if [ -f "$file" ]; then if [ -f "$file" ]; then
@@ -1079,10 +1075,10 @@ handle_wal_files_for_repair() {
fi fi
done done
;; ;;
"cleanup") "cleanup")
log_message "Cleaning up WAL files after repair" log_message "Cleaning up WAL files after repair"
# Remove any remaining WAL/SHM files to force clean state # Remove any remaining WAL/SHM files to force clean state
for file in "$wal_file" "$shm_file"; do for file in "$wal_file" "$shm_file"; do
if [ -f "$file" ]; then if [ -f "$file" ]; then
@@ -1091,7 +1087,7 @@ handle_wal_files_for_repair() {
fi fi
fi fi
done done
# Force WAL mode back on for consistency # Force WAL mode back on for consistency
if sudo "$PLEX_SQLITE" "$db_file" "PRAGMA journal_mode=WAL;" 2>/dev/null | grep -q "wal"; then if sudo "$PLEX_SQLITE" "$db_file" "PRAGMA journal_mode=WAL;" 2>/dev/null | grep -q "wal"; then
log_success "WAL mode restored for $(basename "$db_file")" log_success "WAL mode restored for $(basename "$db_file")"
@@ -1099,10 +1095,10 @@ handle_wal_files_for_repair() {
log_warning "Failed to restore WAL mode for $(basename "$db_file")" log_warning "Failed to restore WAL mode for $(basename "$db_file")"
fi fi
;; ;;
"restore") "restore")
log_message "Restoring WAL files after failed repair" log_message "Restoring WAL files after failed repair"
# Restore WAL/SHM backup files if they exist # Restore WAL/SHM backup files if they exist
for file in "$wal_file" "$shm_file"; do for file in "$wal_file" "$shm_file"; do
local backup_file="${file}.repair-backup" local backup_file="${file}.repair-backup"
@@ -1310,7 +1306,7 @@ verify_backup() {
log_error "Backup verification failed after $max_retries attempts: $(basename "$src")" log_error "Backup verification failed after $max_retries attempts: $(basename "$src")"
log_error "Source checksum: $src_checksum" log_error "Source checksum: $src_checksum"
log_error "Backup checksum: $dest_checksum" log_error "Backup checksum: $dest_checksum"
# For database files, perform additional integrity check on backup # For database files, perform additional integrity check on backup
if [[ "$dest" == *.db ]]; then if [[ "$dest" == *.db ]]; then
log_warning "Database file checksum mismatch - checking backup integrity..." log_warning "Database file checksum mismatch - checking backup integrity..."
@@ -1375,17 +1371,17 @@ manage_plex_service() {
log_warning "Normal stop failed, attempting extended graceful shutdown..." log_warning "Normal stop failed, attempting extended graceful shutdown..."
local plex_pids local plex_pids
plex_pids=$(pgrep -f "Plex Media Server" 2>/dev/null || true) plex_pids=$(pgrep -f "Plex Media Server" 2>/dev/null || true)
if [ -n "$plex_pids" ]; then if [ -n "$plex_pids" ]; then
log_message "Found Plex processes: $plex_pids" log_message "Found Plex processes: $plex_pids"
log_message "Sending graceful termination signal and waiting longer..." log_message "Sending graceful termination signal and waiting longer..."
# Send TERM signal for graceful shutdown # Send TERM signal for graceful shutdown
if sudo pkill -TERM -f "Plex Media Server" 2>/dev/null || true; then if sudo pkill -TERM -f "Plex Media Server" 2>/dev/null || true; then
# Extended wait for graceful shutdown (up to 60 seconds) # Extended wait for graceful shutdown (up to 60 seconds)
local extended_wait=0 local extended_wait=0
local max_extended_wait=60 local max_extended_wait=60
while [ $extended_wait -lt $max_extended_wait ]; do while [ $extended_wait -lt $max_extended_wait ]; do
plex_pids=$(pgrep -f "Plex Media Server" 2>/dev/null || true) plex_pids=$(pgrep -f "Plex Media Server" 2>/dev/null || true)
if [ -z "$plex_pids" ]; then if [ -z "$plex_pids" ]; then
@@ -1398,7 +1394,7 @@ manage_plex_service() {
echo -n "." echo -n "."
done done
echo echo
# If still running after extended wait, log error but don't force kill # If still running after extended wait, log error but don't force kill
plex_pids=$(pgrep -f "Plex Media Server" 2>/dev/null || true) plex_pids=$(pgrep -f "Plex Media Server" 2>/dev/null || true)
if [ -n "$plex_pids" ]; then if [ -n "$plex_pids" ]; then