#!/bin/bash ################################################################################ # Plex Media Server Enhanced Backup Script ################################################################################ # # Author: Peter Wood # Description: Comprehensive backup solution for Plex Media Server with advanced # database integrity checking, automated repair capabilities, # performance monitoring, and multi-channel notifications. # # Features: # - Database integrity verification with automatic repair # - WAL (Write-Ahead Logging) file handling # - Performance monitoring with JSON logging # - Parallel verification for improved speed # - Multi-channel notifications (webhook, email, console) # - Comprehensive error handling and recovery # - Automated cleanup of old backups # # Related Scripts: # - restore-plex.sh: Restore from backups created by this script # - validate-plex-backups.sh: Validate backup integrity and health # - monitor-plex-backup.sh: Real-time monitoring dashboard # - test-plex-backup.sh: Comprehensive testing suite # - plex.sh: General Plex service management # # Usage: # ./backup-plex.sh # Standard backup with auto-repair # ./backup-plex.sh --disable-auto-repair # Backup without auto-repair # ./backup-plex.sh --check-integrity # Integrity check only # ./backup-plex.sh --non-interactive # Automated mode for cron jobs # # Dependencies: # - Plex Media Server # - sqlite3 or Plex SQLite binary # - curl (for webhook notifications) # - jq (for JSON processing) # - sendmail (optional, for email notifications) # # Exit Codes: # 0 - Success # 1 - General error # 2 - Database integrity issues # 3 - Service management failure # 4 - Backup creation failure # ################################################################################ set -e # Color codes for output RED='\033[0;31m' GREEN='\033[0;32m' YELLOW='\033[1;33m' BLUE='\033[0;34m' CYAN='\033[0;36m' NC='\033[0m' # No Color # Performance tracking variables (removed unused variables) # Configuration MAX_BACKUP_AGE_DAYS=30 MAX_BACKUPS_TO_KEEP=10 BACKUP_ROOT="/mnt/share/media/backups/plex" SHARED_LOG_ROOT="/mnt/share/media/backups/logs" SCRIPT_DIR="$(dirname "$(readlink -f "$0")")" LOCAL_LOG_ROOT="${SCRIPT_DIR}/logs" PERFORMANCE_LOG_FILE="${LOCAL_LOG_ROOT}/plex-backup-performance.json" # Backup strategy configuration - Always perform full backups # Plex SQLite path (custom Plex SQLite binary) PLEX_SQLITE="/usr/lib/plexmediaserver/Plex SQLite" # Script options AUTO_REPAIR=true # Default to enabled for automatic corruption detection and repair INTEGRITY_CHECK_ONLY=false INTERACTIVE_MODE=false PARALLEL_VERIFICATION=true PERFORMANCE_MONITORING=true WEBHOOK_URL="https://notify.peterwood.rocks/lab" EMAIL_RECIPIENT="" # Parse command line arguments while [[ $# -gt 0 ]]; do case $1 in --auto-repair) AUTO_REPAIR=true INTERACTIVE_MODE=false shift ;; --disable-auto-repair) AUTO_REPAIR=false shift ;; --check-integrity) INTEGRITY_CHECK_ONLY=true shift ;; --non-interactive) INTERACTIVE_MODE=false shift ;; --interactive) INTERACTIVE_MODE=true shift ;; --no-parallel) PARALLEL_VERIFICATION=false shift ;; --no-performance) PERFORMANCE_MONITORING=false shift ;; --webhook=*) WEBHOOK_URL="${1#*=}" shift ;; --email=*) EMAIL_RECIPIENT="${1#*=}" shift ;; -h|--help) echo "Usage: $0 [OPTIONS]" echo "Options:" echo " --auto-repair Force enable automatic database repair (default: enabled)" echo " --disable-auto-repair Disable automatic database repair" echo " --check-integrity Only check database integrity, don't backup" echo " --non-interactive Run in non-interactive mode (for automation)" echo " --interactive Run in interactive mode (prompts for repair decisions)" echo " --no-parallel Disable parallel verification (slower but safer)" echo " --no-performance Disable performance monitoring" echo " --webhook=URL Send notifications to webhook URL" echo " --email=ADDRESS Send notifications to email address" echo " -h, --help Show this help message" echo "" echo "Database Integrity & Repair:" echo " By default, the script automatically detects and attempts to repair" echo " corrupted databases before backup. Use --disable-auto-repair to" echo " skip repair and backup corrupted databases as-is." echo "" exit 0 ;; *) echo "Unknown option: $1" exit 1 ;; esac done # Create logs directory mkdir -p "${SCRIPT_DIR}/logs" # Define Plex files and their nicknames declare -A PLEX_FILES=( ["database"]="/var/lib/plexmediaserver/Library/Application Support/Plex Media Server/Plug-in Support/Databases/com.plexapp.plugins.library.db" ["blobs"]="/var/lib/plexmediaserver/Library/Application Support/Plex Media Server/Plug-in Support/Databases/com.plexapp.plugins.library.blobs.db" ["preferences"]="/var/lib/plexmediaserver/Library/Application Support/Plex Media Server/Preferences.xml" ) # Logging functions log_message() { local message="$1" local timestamp timestamp=$(date '+%Y-%m-%d %H:%M:%S') echo -e "${CYAN}[${timestamp}]${NC} ${message}" mkdir -p "$LOCAL_LOG_ROOT" echo "[${timestamp}] $message" >> "${LOCAL_LOG_ROOT}/plex-backup-$(date '+%Y-%m-%d').log" 2>/dev/null || true } log_error() { local message="$1" local timestamp timestamp=$(date '+%Y-%m-%d %H:%M:%S') echo -e "${RED}[${timestamp}] ERROR:${NC} ${message}" mkdir -p "$LOCAL_LOG_ROOT" echo "[${timestamp}] ERROR: $message" >> "${LOCAL_LOG_ROOT}/plex-backup-$(date '+%Y-%m-%d').log" 2>/dev/null || true } log_success() { local message="$1" local timestamp timestamp=$(date '+%Y-%m-%d %H:%M:%S') echo -e "${GREEN}[${timestamp}] SUCCESS:${NC} ${message}" mkdir -p "$LOCAL_LOG_ROOT" echo "[${timestamp}] SUCCESS: $message" >> "${LOCAL_LOG_ROOT}/plex-backup-$(date '+%Y-%m-%d').log" 2>/dev/null || true } log_warning() { local message="$1" local timestamp timestamp=$(date '+%Y-%m-%d %H:%M:%S') echo -e "${YELLOW}[${timestamp}] WARNING:${NC} ${message}" mkdir -p "$LOCAL_LOG_ROOT" echo "[${timestamp}] WARNING: $message" >> "${LOCAL_LOG_ROOT}/plex-backup-$(date '+%Y-%m-%d').log" 2>/dev/null || true } log_info() { local message="$1" local timestamp timestamp=$(date '+%Y-%m-%d %H:%M:%S') echo -e "${BLUE}[${timestamp}] INFO:${NC} ${message}" mkdir -p "$LOCAL_LOG_ROOT" echo "[${timestamp}] INFO: $message" >> "${LOCAL_LOG_ROOT}/plex-backup-$(date '+%Y-%m-%d').log" 2>/dev/null || true } # Performance tracking functions track_performance() { if [ "$PERFORMANCE_MONITORING" != true ]; then return 0 fi local operation="$1" local start_time="$2" local end_time="${3:-$(date +%s)}" local duration=$((end_time - start_time)) # Initialize performance log if it doesn't exist if [ ! -f "$PERFORMANCE_LOG_FILE" ]; then mkdir -p "$(dirname "$PERFORMANCE_LOG_FILE")" echo "[]" > "$PERFORMANCE_LOG_FILE" fi # Add performance entry local entry entry=$(jq -n \ --arg operation "$operation" \ --arg duration "$duration" \ --arg timestamp "$(date -Iseconds)" \ '{ operation: $operation, duration_seconds: ($duration | tonumber), timestamp: $timestamp }') jq --argjson entry "$entry" '. += [$entry]' "$PERFORMANCE_LOG_FILE" > "${PERFORMANCE_LOG_FILE}.tmp" && \ mv "${PERFORMANCE_LOG_FILE}.tmp" "$PERFORMANCE_LOG_FILE" log_info "Performance: $operation completed in ${duration}s" } # Initialize log directory initialize_logs() { mkdir -p "$(dirname "$PERFORMANCE_LOG_FILE")" if [ ! -f "$PERFORMANCE_LOG_FILE" ]; then echo "[]" > "$PERFORMANCE_LOG_FILE" log_message "Initialized performance log file" fi } # Log synchronization functions sync_logs_to_shared() { local sync_start_time sync_start_time=$(date +%s) log_info "Starting log synchronization to shared location" # Ensure shared log directory exists if ! mkdir -p "$SHARED_LOG_ROOT" 2>/dev/null; then log_warning "Could not create shared log directory: $SHARED_LOG_ROOT" return 1 fi # Check if shared location is accessible if [ ! -w "$SHARED_LOG_ROOT" ]; then log_warning "Shared log directory is not writable: $SHARED_LOG_ROOT" return 1 fi # Sync log files (one-way: local -> shared) local sync_count=0 local error_count=0 for log_file in "$LOCAL_LOG_ROOT"/*.log "$LOCAL_LOG_ROOT"/*.json; do if [ -f "$log_file" ]; then local filename filename=$(basename "$log_file") local shared_file="$SHARED_LOG_ROOT/$filename" # Only copy if file doesn't exist in shared location or local is newer if [ ! -f "$shared_file" ] || [ "$log_file" -nt "$shared_file" ]; then if cp "$log_file" "$shared_file" 2>/dev/null; then ((sync_count++)) log_info "Synced: $filename" else ((error_count++)) log_warning "Failed to sync: $filename" fi fi fi done local sync_end_time sync_end_time=$(date +%s) local sync_duration=$((sync_end_time - sync_start_time)) if [ $error_count -eq 0 ]; then log_success "Log sync completed: $sync_count files synced in ${sync_duration}s" else log_warning "Log sync completed with errors: $sync_count synced, $error_count failed in ${sync_duration}s" fi return $error_count } # Cleanup old local logs (30 day retention) cleanup_old_local_logs() { local cleanup_start_time cleanup_start_time=$(date +%s) log_info "Starting cleanup of old local logs (30+ days)" if [ ! -d "$LOCAL_LOG_ROOT" ]; then log_info "Local log directory does not exist, nothing to clean up" return 0 fi local cleanup_count=0 local error_count=0 # Find and remove log files older than 30 days while IFS= read -r -d '' old_file; do local filename filename=$(basename "$old_file") if rm "$old_file" 2>/dev/null; then ((cleanup_count++)) log_info "Removed old log: $filename" else ((error_count++)) log_warning "Failed to remove old log: $filename" fi done < <(find "$LOCAL_LOG_ROOT" -name "*.log" -mtime +30 -print0 2>/dev/null) # Also clean up old performance log entries (keep structure, remove old entries) if [ -f "$PERFORMANCE_LOG_FILE" ]; then local thirty_days_ago thirty_days_ago=$(date -d '30 days ago' -Iseconds) local temp_perf_file="${PERFORMANCE_LOG_FILE}.cleanup.tmp" if jq --arg cutoff "$thirty_days_ago" '[.[] | select(.timestamp >= $cutoff)]' "$PERFORMANCE_LOG_FILE" > "$temp_perf_file" 2>/dev/null; then local old_count old_count=$(jq length "$PERFORMANCE_LOG_FILE" 2>/dev/null || echo "0") local new_count new_count=$(jq length "$temp_perf_file" 2>/dev/null || echo "0") local removed_count=$((old_count - new_count)) if [ "$removed_count" -gt 0 ]; then mv "$temp_perf_file" "$PERFORMANCE_LOG_FILE" log_info "Cleaned up $removed_count old performance entries" ((cleanup_count += removed_count)) else rm -f "$temp_perf_file" fi else rm -f "$temp_perf_file" log_warning "Failed to clean up old performance log entries" ((error_count++)) fi fi local cleanup_end_time cleanup_end_time=$(date +%s) local cleanup_duration=$((cleanup_end_time - cleanup_start_time)) if [ $cleanup_count -gt 0 ]; then log_success "Cleanup completed: $cleanup_count items removed in ${cleanup_duration}s" else log_info "Cleanup completed: no old items found to remove in ${cleanup_duration}s" fi return $error_count } # Enhanced notification system send_notification() { local title="$1" local message="$2" local status="${3:-info}" # success, error, warning, info local hostname hostname=$(hostname) # Console notification case "$status" in success) log_success "$title: $message" ;; error) log_error "$title: $message" ;; warning) log_warning "$title: $message" ;; *) log_info "$title: $message" ;; esac # Webhook notification if [ -n "$WEBHOOK_URL" ]; then local tags="backup,plex,${hostname}" [ "$status" == "error" ] && tags="${tags},errors" [ "$status" == "warning" ] && tags="${tags},warnings" # Clean message without newlines or timestamps for webhook local webhook_message="$message" curl -s \ -H "tags:${tags}" \ -d "$webhook_message" \ "$WEBHOOK_URL" 2>/dev/null || log_warning "Failed to send webhook notification" fi # Email notification (if sendmail is available) if [ -n "$EMAIL_RECIPIENT" ] && command -v sendmail > /dev/null 2>&1; then { echo "To: $EMAIL_RECIPIENT" echo "Subject: Plex Backup - $title" echo "Content-Type: text/plain" echo "" echo "Host: $hostname" echo "Time: $(date)" echo "Status: $status" echo "" echo "$message" } | sendmail "$EMAIL_RECIPIENT" 2>/dev/null || true fi } # Format backed up files list for notifications format_backed_up_files() { local files=("$@") local count=${#files[@]} if [ "$count" -eq 0 ]; then echo "no files" elif [ "$count" -eq 1 ]; then echo "${files[0]}" elif [ "$count" -eq 2 ]; then echo "${files[0]} and ${files[1]}" else local last_file="${files[-1]}" local other_files=("${files[@]:0:$((count-1))}") local other_files_str other_files_str=$(IFS=', '; echo "${other_files[*]}") echo "${other_files_str}, and ${last_file}" fi } # Enhanced checksum calculation with caching calculate_checksum() { local file="$1" # Use /tmp for cache files to avoid permission issues local cache_dir="/tmp/plex-backup-cache" local cache_file="$cache_dir/${file//\//_}.md5" local file_mtime file_mtime=$(stat -c %Y "$file" 2>/dev/null || echo "0") # Create cache directory if it doesn't exist mkdir -p "$cache_dir" 2>/dev/null || true # Check if cached checksum exists and is newer than file if [ -f "$cache_file" ]; then local cache_mtime cache_mtime=$(stat -c %Y "$cache_file" 2>/dev/null || echo "0") if [ "$cache_mtime" -gt "$file_mtime" ]; then local cached_checksum cached_checksum=$(cat "$cache_file" 2>/dev/null) if [[ -n "$cached_checksum" && "$cached_checksum" =~ ^[a-f0-9]{32}$ ]]; then echo "$cached_checksum" return 0 fi fi fi # Calculate new checksum local checksum checksum=$(md5sum "$file" 2>/dev/null | cut -d' ' -f1) # Check if we got a valid checksum (not empty and looks like md5) if [[ -n "$checksum" && "$checksum" =~ ^[a-f0-9]{32}$ ]]; then # Cache the checksum echo "$checksum" > "$cache_file" 2>/dev/null || true echo "$checksum" return 0 fi # If normal access failed or returned empty, try with sudo checksum=$(sudo md5sum "$file" 2>/dev/null | cut -d' ' -f1) # Check if sudo checksum is valid if [[ -n "$checksum" && "$checksum" =~ ^[a-f0-9]{32}$ ]]; then # Cache the checksum with appropriate permissions echo "$checksum" | sudo tee "$cache_file" >/dev/null 2>&1 || true echo "$checksum" return 0 fi # If both fail, return error indicator echo "PERMISSION_DENIED" return 1 } # Check database integrity using Plex SQLite check_database_integrity() { local db_file="$1" local db_name db_name=$(basename "$db_file") log_message "Checking database integrity: $db_name" # Check if Plex SQLite exists if [ ! -f "$PLEX_SQLITE" ]; then log_error "Plex SQLite binary not found at: $PLEX_SQLITE" return 1 fi # Make Plex SQLite executable if it isn't already sudo chmod +x "$PLEX_SQLITE" 2>/dev/null || true # Run integrity check local integrity_result integrity_result=$(sudo "$PLEX_SQLITE" "$db_file" "PRAGMA integrity_check;" 2>&1) local check_exit_code=$? if [ $check_exit_code -ne 0 ]; then log_error "Failed to run integrity check on $db_name: $integrity_result" return 1 fi if echo "$integrity_result" | grep -q "^ok$"; then log_success "Database integrity check passed: $db_name" return 0 else log_warning "Database integrity issues detected in $db_name:" echo "$integrity_result" | while read -r line; do log_warning " $line" done return 1 fi } # Advanced database repair using project methods repair_database() { local db_file="$1" local db_name db_name=$(basename "$db_file") local backup_file="${db_file}.pre-repair-backup" local timestamp timestamp=$(date "+%Y-%m-%d_%H.%M.%S") local db_dir db_dir=$(dirname "$db_file") local temp_dir="${db_dir}/repair-temp-${timestamp}" log_message "Starting advanced database repair for: $db_name" # Create temporary repair directory sudo mkdir -p "$temp_dir" # Create backup before repair if sudo cp "$db_file" "$backup_file"; then log_success "Created pre-repair backup: $(basename "$backup_file")" else log_error "Failed to create pre-repair backup" sudo rm -rf "$temp_dir" 2>/dev/null || true return 1 fi # Step 1: Database cleanup (DBRepair method) log_message "Step 1: Database cleanup and optimization..." local vacuum_result vacuum_result=$(sudo "$PLEX_SQLITE" "$db_file" "VACUUM;" 2>&1) local vacuum_exit_code=$? if [ $vacuum_exit_code -ne 0 ]; then log_warning "VACUUM failed: $vacuum_result" log_message "Attempting dump/restore method..." # Step 2: Dump and restore (fallback method) local dump_file="${temp_dir}/${db_name}.sql" local new_db_file="${temp_dir}/${db_name}.new" log_message "Step 2: Dumping database to SQL..." if sudo "$PLEX_SQLITE" "$db_file" ".dump" | sudo tee "$dump_file" >/dev/null 2>&1; then log_success "Database dumped successfully" log_message "Step 3: Creating new database from dump..." if sudo "$PLEX_SQLITE" "$new_db_file" ".read $dump_file" 2>/dev/null; then log_success "New database created successfully" # Replace original with repaired version if sudo mv "$new_db_file" "$db_file"; then log_success "Database replaced with repaired version" # Set proper ownership sudo chown plex:plex "$db_file" sudo chmod 644 "$db_file" # Cleanup sudo rm -rf "$temp_dir" return 0 else log_error "Failed to replace original database" fi else log_error "Failed to create new database from dump" fi else log_error "Failed to dump database" fi else log_success "Database VACUUM completed successfully" # Run reindex for good measure log_message "Running REINDEX..." local reindex_result reindex_result=$(sudo "$PLEX_SQLITE" "$db_file" "REINDEX;" 2>&1) local reindex_exit_code=$? if [ $reindex_exit_code -eq 0 ]; then log_success "Database REINDEX completed successfully" sudo rm -rf "$temp_dir" return 0 else log_warning "REINDEX failed: $reindex_result" fi fi # If we get here, repair failed log_error "Database repair failed. Restoring original..." if sudo mv "$backup_file" "$db_file"; then log_success "Original database restored" else log_error "Failed to restore original database!" fi sudo rm -rf "$temp_dir" return 1 } # WAL (Write-Ahead Logging) file handling handle_wal_files() { local action="$1" # "backup" or "restore" local backup_path="$2" log_info "Handling WAL files: $action" # Define WAL files that might exist local wal_files=( "/var/lib/plexmediaserver/Library/Application Support/Plex Media Server/Plug-in Support/Databases/com.plexapp.plugins.library.db-wal" "/var/lib/plexmediaserver/Library/Application Support/Plex Media Server/Plug-in Support/Databases/com.plexapp.plugins.library.db-shm" "/var/lib/plexmediaserver/Library/Application Support/Plex Media Server/Plug-in Support/Databases/com.plexapp.plugins.library.blobs.db-wal" "/var/lib/plexmediaserver/Library/Application Support/Plex Media Server/Plug-in Support/Databases/com.plexapp.plugins.library.blobs.db-shm" ) for wal_file in "${wal_files[@]}"; do local wal_basename wal_basename=$(basename "$wal_file") case "$action" in "backup") if [ -f "$wal_file" ]; then log_info "Found WAL/SHM file: $wal_basename" local backup_file="${backup_path}/${wal_basename}" if sudo cp "$wal_file" "$backup_file"; then log_success "Backed up WAL/SHM file: $wal_basename" # Verify backup if verify_backup "$wal_file" "$backup_file"; then log_success "Verified WAL/SHM backup: $wal_basename" else log_warning "WAL/SHM backup verification failed: $wal_basename" fi else log_warning "Failed to backup WAL/SHM file: $wal_basename" fi else log_info "WAL/SHM file not found (normal): $wal_basename" fi ;; "checkpoint") # Force WAL checkpoint to integrate changes into main database local db_file="${wal_file%.db-*}.db" if [ -f "$db_file" ] && [ -f "$wal_file" ]; then log_info "Performing WAL checkpoint for: $(basename "$db_file")" if sudo "$PLEX_SQLITE" "$db_file" "PRAGMA wal_checkpoint(FULL);" 2>/dev/null; then log_success "WAL checkpoint completed for: $(basename "$db_file")" else log_warning "WAL checkpoint failed for: $(basename "$db_file")" fi fi ;; esac done } # Enhanced database integrity check with WAL handling check_database_integrity_with_wal() { local db_file="$1" local db_name db_name=$(basename "$db_file") log_message "Checking database integrity with WAL handling: $db_name" # Check if Plex SQLite exists if [ ! -f "$PLEX_SQLITE" ]; then log_error "Plex SQLite binary not found at: $PLEX_SQLITE" return 1 fi # Make Plex SQLite executable if it isn't already sudo chmod +x "$PLEX_SQLITE" 2>/dev/null || true # Check if WAL file exists and handle it local wal_file="${db_file}-wal" if [ -f "$wal_file" ]; then log_info "WAL file found for $db_name, performing checkpoint..." if sudo "$PLEX_SQLITE" "$db_file" "PRAGMA wal_checkpoint(FULL);" 2>/dev/null; then log_success "WAL checkpoint completed for $db_name" else log_warning "WAL checkpoint failed for $db_name, proceeding with integrity check" fi fi # Run integrity check local integrity_result integrity_result=$(sudo "$PLEX_SQLITE" "$db_file" "PRAGMA integrity_check;" 2>&1) local check_exit_code=$? if [ $check_exit_code -ne 0 ]; then log_error "Failed to run integrity check on $db_name: $integrity_result" return 1 fi if echo "$integrity_result" | grep -q "^ok$"; then log_success "Database integrity check passed: $db_name" return 0 else log_warning "Database integrity issues detected in $db_name:" echo "$integrity_result" | while read -r line; do log_warning " $line" done return 1 fi } # Parallel verification function verify_files_parallel() { local backup_dir="$1" local -a pids=() local temp_dir temp_dir=$(mktemp -d) local verification_errors=0 if [ "$PARALLEL_VERIFICATION" != true ]; then # Fall back to sequential verification for nickname in "${!PLEX_FILES[@]}"; do local src_file="${PLEX_FILES[$nickname]}" local dest_file dest_file="$backup_dir/$(basename "$src_file")" if [ -f "$dest_file" ]; then if ! verify_backup "$src_file" "$dest_file"; then verification_errors=$((verification_errors + 1)) fi fi done return $verification_errors fi log_info "Starting parallel verification in $backup_dir" # Start verification jobs in parallel for nickname in "${!PLEX_FILES[@]}"; do local src_file="${PLEX_FILES[$nickname]}" local dest_file dest_file="$backup_dir/$(basename "$src_file")" if [ -f "$dest_file" ]; then ( local result_file="$temp_dir/$nickname.result" if verify_backup "$src_file" "$dest_file"; then echo "0" > "$result_file" else echo "1" > "$result_file" fi ) & pids+=($!) fi done # Wait for all verification jobs to complete for pid in "${pids[@]}"; do wait "$pid" done # Collect results for nickname in "${!PLEX_FILES[@]}"; do local result_file="$temp_dir/$nickname.result" if [ -f "$result_file" ]; then local result result=$(cat "$result_file") if [ "$result" != "0" ]; then verification_errors=$((verification_errors + 1)) fi fi done # Cleanup rm -rf "$temp_dir" return $verification_errors } # Verify backup integrity with robust checksum handling verify_backup() { local src="$1" local dest="$2" log_message "Verifying backup integrity: $(basename "$src")" # Calculate destination checksum first (this doesn't change) local dest_checksum dest_checksum=$(sudo md5sum "$dest" 2>/dev/null | cut -d' ' -f1) local dest_result=$? if [ $dest_result -ne 0 ] || [[ ! "$dest_checksum" =~ ^[a-f0-9]{32}$ ]]; then log_error "Failed to calculate destination checksum for $(basename "$dest")" return 1 fi # Calculate source checksum (without caching to get current state) local src_checksum src_checksum=$(sudo md5sum "$src" 2>/dev/null | cut -d' ' -f1) local src_result=$? if [ $src_result -ne 0 ] || [[ ! "$src_checksum" =~ ^[a-f0-9]{32}$ ]]; then log_error "Failed to calculate source checksum for $(basename "$src")" return 1 fi if [ "$src_checksum" == "$dest_checksum" ]; then log_success "Backup verification passed: $(basename "$src")" log_info "Source checksum: $src_checksum" log_info "Backup checksum: $dest_checksum" return 0 else # If checksums don't match, wait a moment and try again (in case of delayed writes) log_warning "Initial checksum mismatch for $(basename "$src"), retrying in 2 seconds..." sleep 2 # Recalculate source checksum src_checksum=$(sudo md5sum "$src" 2>/dev/null | cut -d' ' -f1) src_result=$? if [ $src_result -ne 0 ] || [[ ! "$src_checksum" =~ ^[a-f0-9]{32}$ ]]; then log_error "Failed to recalculate source checksum for $(basename "$src")" return 1 fi if [ "$src_checksum" == "$dest_checksum" ]; then log_success "Backup verification passed on retry: $(basename "$src")" log_info "Source checksum: $src_checksum" log_info "Backup checksum: $dest_checksum" return 0 else log_error "Backup verification failed: $(basename "$src")" log_error "Source checksum: $src_checksum" log_error "Backup checksum: $dest_checksum" # For database files, this might be normal if Plex processes modified the file # Let's do a final check - if the backup file is a valid database, we might accept it if [[ "$(basename "$src")" == *.db ]]; then log_warning "Database file checksum mismatch might be due to post-backup modifications" log_warning "Checking if backup database is valid..." # Basic SQLite validation if sudo "$PLEX_SQLITE" "$dest" "PRAGMA integrity_check;" 2>/dev/null | grep -q "^ok$"; then log_warning "Backup database integrity is valid despite checksum mismatch" log_warning "Accepting backup (source file may have been modified after copy)" return 0 else log_error "Backup database integrity check failed" return 1 fi fi return 1 fi fi } # Enhanced service management with better monitoring manage_plex_service() { local action="$1" local operation_start operation_start=$(date +%s) log_message "Managing Plex service: $action" case "$action" in stop) if sudo systemctl stop plexmediaserver.service; then log_success "Plex service stopped" # Wait for clean shutdown with progress indicator local wait_time=0 local max_wait=15 while [ $wait_time -lt $max_wait ]; do if ! sudo systemctl is-active --quiet plexmediaserver.service; then log_success "Plex service confirmed stopped (${wait_time}s)" track_performance "service_stop" "$operation_start" return 0 fi sleep 1 wait_time=$((wait_time + 1)) echo -n "." done echo log_warning "Plex service may not have stopped cleanly after ${max_wait}s" return 1 else log_error "Failed to stop Plex service" return 1 fi ;; start) if sudo systemctl start plexmediaserver.service; then log_success "Plex service start command issued" # Wait for service to be fully running with progress indicator local wait_time=0 local max_wait=30 while [ $wait_time -lt $max_wait ]; do if sudo systemctl is-active --quiet plexmediaserver.service; then log_success "Plex service confirmed running (${wait_time}s)" track_performance "service_start" "$operation_start" return 0 fi sleep 1 wait_time=$((wait_time + 1)) echo -n "." done echo log_error "Plex service failed to start within ${max_wait}s" return 1 else log_error "Failed to start Plex service" return 1 fi ;; *) log_error "Invalid service action: $action" return 1 ;; esac } # Check available disk space check_disk_space() { local backup_dir="$1" local required_space_mb="$2" local available_space_kb available_space_kb=$(df "$backup_dir" | awk 'NR==2 {print $4}') local available_space_mb=$((available_space_kb / 1024)) if [ "$available_space_mb" -lt "$required_space_mb" ]; then log_error "Insufficient disk space. Required: ${required_space_mb}MB, Available: ${available_space_mb}MB" return 1 fi log_message "Disk space check passed. Available: ${available_space_mb}MB" return 0 } # Estimate backup size estimate_backup_size() { local total_size=0 for nickname in "${!PLEX_FILES[@]}"; do local file="${PLEX_FILES[$nickname]}" if [ -f "$file" ]; then local size_kb size_kb=$(du -k "$file" 2>/dev/null | cut -f1) total_size=$((total_size + size_kb)) fi done echo $((total_size / 1024)) # Return size in MB } # Generate performance report generate_performance_report() { if [ "$PERFORMANCE_MONITORING" != true ] || [ ! -f "$PERFORMANCE_LOG_FILE" ]; then return 0 fi log_info "Performance Summary:" # Recent performance data (last 10 entries) jq -r '.[-10:] | .[] | " \(.operation): \(.duration_seconds)s (\(.timestamp))"' "$PERFORMANCE_LOG_FILE" 2>/dev/null || true # Calculate averages for common operations local avg_backup avg_backup=$(jq '[.[] | select(.operation == "backup") | .duration_seconds] | if length > 0 then add/length else 0 end' "$PERFORMANCE_LOG_FILE" 2>/dev/null || echo "0") local avg_verification avg_verification=$(jq '[.[] | select(.operation == "verification") | .duration_seconds] | if length > 0 then add/length else 0 end' "$PERFORMANCE_LOG_FILE" 2>/dev/null || echo "0") local avg_service_stop avg_service_stop=$(jq '[.[] | select(.operation == "service_stop") | .duration_seconds] | if length > 0 then add/length else 0 end' "$PERFORMANCE_LOG_FILE" 2>/dev/null || echo "0") local avg_service_start avg_service_start=$(jq '[.[] | select(.operation == "service_start") | .duration_seconds] | if length > 0 then add/length else 0 end' "$PERFORMANCE_LOG_FILE" 2>/dev/null || echo "0") if [ "$avg_backup" != "0" ]; then log_info "Average backup time: ${avg_backup}s" fi if [ "$avg_verification" != "0" ]; then log_info "Average verification time: ${avg_verification}s" fi if [ "$avg_service_stop" != "0" ]; then log_info "Average service stop time: ${avg_service_stop}s" fi if [ "$avg_service_start" != "0" ]; then log_info "Average service start time: ${avg_service_start}s" fi } # Clean old backups cleanup_old_backups() { log_message "Cleaning up old backups..." # Remove backups older than MAX_BACKUP_AGE_DAYS find "${BACKUP_ROOT}" -maxdepth 1 -type f -name "plex-backup-*.tar.gz" -mtime +${MAX_BACKUP_AGE_DAYS} -delete 2>/dev/null || true # Keep only MAX_BACKUPS_TO_KEEP most recent backups local backup_count backup_count=$(find "${BACKUP_ROOT}" -maxdepth 1 -type f -name "plex-backup-*.tar.gz" | wc -l) if [ "$backup_count" -gt "$MAX_BACKUPS_TO_KEEP" ]; then local excess_count=$((backup_count - MAX_BACKUPS_TO_KEEP)) log_message "Removing $excess_count old backup(s)..." find "${BACKUP_ROOT}" -maxdepth 1 -type f -name "plex-backup-*.tar.gz" -printf '%T@ %p\n' | \ sort -n | head -n "$excess_count" | cut -d' ' -f2- | \ xargs -r rm -f fi # Clean up any remaining dated directories from old backup structure find "${BACKUP_ROOT}" -maxdepth 1 -type d -name "????????" -exec rm -rf {} \; 2>/dev/null || true log_message "Backup cleanup completed" } # Database integrity check only check_integrity_only() { log_message "Starting database integrity check at $(date)" # Stop Plex service manage_plex_service stop # Handle WAL files first handle_wal_files "checkpoint" local db_integrity_issues=0 local databases_checked=0 for nickname in "${!PLEX_FILES[@]}"; do local file="${PLEX_FILES[$nickname]}" # Only check database files if [[ "$file" == *".db" ]] && [ -f "$file" ]; then databases_checked=$((databases_checked + 1)) log_message "Checking integrity of $(basename "$file")..." if ! check_database_integrity_with_wal "$file"; then db_integrity_issues=$((db_integrity_issues + 1)) log_warning "Database integrity issues found in $(basename "$file")" # Determine if we should attempt repair local should_repair=false if [ "$AUTO_REPAIR" = true ]; then should_repair=true log_message "Auto-repair enabled, attempting repair..." elif [ "$INTERACTIVE_MODE" = true ]; then read -p "Attempt to repair $(basename "$file")? [y/N]: " -n 1 -r -t 30 local read_result=$? echo if [ $read_result -eq 0 ] && [[ $REPLY =~ ^[Yy]$ ]]; then should_repair=true elif [ $read_result -ne 0 ]; then log_warning "Read timeout or error, defaulting to no repair" fi else log_warning "Non-interactive mode: skipping repair for $(basename "$file")" fi if [ "$should_repair" = true ]; then if repair_database "$file"; then log_success "Database repair successful for $(basename "$file")" # Re-check integrity after repair if check_database_integrity "$file"; then log_success "Post-repair integrity check passed for $(basename "$file")" else log_warning "Post-repair integrity check still shows issues for $(basename "$file")" fi else log_error "Database repair failed for $(basename "$file")" fi fi else log_success "Database integrity check passed for $(basename "$file")" fi fi done # Start Plex service manage_plex_service start # Summary log_message "Integrity check completed at $(date)" log_message "Databases checked: $databases_checked" log_message "Databases with issues: $db_integrity_issues" if [ "$db_integrity_issues" -gt 0 ]; then log_warning "Integrity check completed with issues found" exit 1 else log_success "All database integrity checks passed" exit 0 fi } # Main backup function main() { local overall_start overall_start=$(date +%s) log_message "Starting enhanced Plex backup process at $(date)" send_notification "Backup Started" "Plex backup process initiated" "info" # Create necessary directories mkdir -p "${BACKUP_ROOT}" mkdir -p "${LOCAL_LOG_ROOT}" # Initialize logs initialize_logs # Check if only doing integrity check if [ "$INTEGRITY_CHECK_ONLY" = true ]; then check_integrity_only # shellcheck disable=SC2317 return $? fi # Estimate backup size local estimated_size_mb estimated_size_mb=$(estimate_backup_size) log_message "Estimated backup size: ${estimated_size_mb}MB" # Check disk space (require 2x estimated size for safety) local required_space_mb=$((estimated_size_mb * 2)) if ! check_disk_space "${BACKUP_ROOT}" "$required_space_mb"; then log_error "Aborting backup due to insufficient disk space" exit 1 fi # Stop Plex service manage_plex_service stop local backup_errors=0 local files_backed_up=0 local backed_up_files=() # Array to track successfully backed up files local BACKUP_PATH="${BACKUP_ROOT}" # Ensure backup root directory exists mkdir -p "$BACKUP_PATH" # Handle WAL files and check database integrity before backup log_message "Performing WAL checkpoint and checking database integrity before backup..." handle_wal_files "checkpoint" local db_integrity_issues=0 for nickname in "${!PLEX_FILES[@]}"; do local file="${PLEX_FILES[$nickname]}" # Only check database files if [[ "$file" == *".db" ]] && [ -f "$file" ]; then if ! check_database_integrity_with_wal "$file"; then db_integrity_issues=$((db_integrity_issues + 1)) log_warning "Database integrity issues found in $(basename "$file")" # Always attempt repair when corruption is detected (default behavior) local should_repair=true local repair_attempted=false # Override repair behavior only if explicitly disabled if [ "$AUTO_REPAIR" = false ]; then should_repair=false log_warning "Auto-repair explicitly disabled, skipping repair" elif [ "$INTERACTIVE_MODE" = true ]; then read -p "Database $(basename "$file") has integrity issues. Attempt repair before backup? [Y/n]: " -n 1 -r -t 30 local read_result=$? echo if [ $read_result -eq 0 ] && [[ $REPLY =~ ^[Nn]$ ]]; then should_repair=false log_message "User declined repair for $(basename "$file")" elif [ $read_result -ne 0 ]; then log_message "Read timeout, proceeding with default repair" fi else log_message "Auto-repair enabled by default, attempting repair..." fi if [ "$should_repair" = true ]; then repair_attempted=true log_message "Attempting to repair corrupted database: $(basename "$file")" if repair_database "$file"; then log_success "Database repair successful for $(basename "$file")" # Re-verify integrity after repair if check_database_integrity_with_wal "$file"; then log_success "Post-repair integrity verification passed for $(basename "$file")" # Decrement issue count since repair was successful db_integrity_issues=$((db_integrity_issues - 1)) else log_warning "Post-repair integrity check still shows issues for $(basename "$file")" log_warning "Will backup with known integrity issues" fi else log_error "Database repair failed for $(basename "$file")" log_warning "Will backup corrupted database - manual intervention may be needed" backup_errors=$((backup_errors + 1)) fi else log_warning "Skipping repair - will backup database with known integrity issues" fi # Log repair attempt for monitoring purposes if [ "$repair_attempted" = true ]; then send_notification "Database Repair" "Attempted repair of $(basename "$file")" "warning" fi fi fi done # Handle WAL files backup handle_wal_files "backup" "$BACKUP_PATH" # Backup files - always perform full backup local backup_start backup_start=$(date +%s) for nickname in "${!PLEX_FILES[@]}"; do local file="${PLEX_FILES[$nickname]}" if [ -f "$file" ]; then log_message "Backing up: $(basename "$file")" # Create backup filename without timestamp (use original filename) local backup_file backup_file="${BACKUP_PATH}/$(basename "$file")" # Copy file if sudo cp "$file" "$backup_file"; then log_success "Copied: $(basename "$file")" # Verify backup if verify_backup "$file" "$backup_file"; then log_success "Verified: $(basename "$file")" files_backed_up=$((files_backed_up + 1)) # Add friendly filename to backed up files list case "$(basename "$file")" in "com.plexapp.plugins.library.db") backed_up_files+=("library.db") ;; "com.plexapp.plugins.library.blobs.db") backed_up_files+=("blobs.db") ;; "Preferences.xml") backed_up_files+=("Preferences.xml") ;; *) backed_up_files+=("$(basename "$file")") ;; esac else log_error "Verification failed: $(basename "$file")" backup_errors=$((backup_errors + 1)) # Remove failed backup rm -f "$backup_file" fi else log_error "Failed to copy: $(basename "$file")" backup_errors=$((backup_errors + 1)) fi else log_warning "File not found: $file" fi done # Start Plex service manage_plex_service start # Create archive if files were backed up if [ "$files_backed_up" -gt 0 ]; then log_message "Creating compressed archive..." # Check backup root directory is writable if [ ! -w "$BACKUP_ROOT" ]; then log_error "Backup root directory is not writable: $BACKUP_ROOT" backup_errors=$((backup_errors + 1)) else local temp_archive temp_archive="/tmp/plex-backup-$(date '+%Y%m%d_%H%M%S').tar.gz" local final_archive final_archive="${BACKUP_ROOT}/plex-backup-$(date '+%Y%m%d_%H%M%S').tar.gz" log_info "Temporary archive: $temp_archive" log_info "Final archive: $final_archive" # Create archive in /tmp first, containing only the backed up files local temp_dir temp_dir="/tmp/plex-backup-staging-$(date '+%Y%m%d_%H%M%S')" if ! mkdir -p "$temp_dir"; then log_error "Failed to create staging directory: $temp_dir" backup_errors=$((backup_errors + 1)) else log_info "Created staging directory: $temp_dir" # Copy backed up files to staging directory local files_staged=0 for nickname in "${!PLEX_FILES[@]}"; do local file="${PLEX_FILES[$nickname]}" local backup_file backup_file="${BACKUP_PATH}/$(basename "$file")" if [ -f "$backup_file" ]; then if cp "$backup_file" "$temp_dir/"; then files_staged=$((files_staged + 1)) log_info "Staged for archive: $(basename "$backup_file")" else log_warning "Failed to stage file: $(basename "$backup_file")" fi else log_warning "Backup file not found for staging: $(basename "$backup_file")" fi done # Check if any files were staged if [ "$files_staged" -eq 0 ]; then log_error "No files were staged for archive creation" rm -rf "$temp_dir" backup_errors=$((backup_errors + 1)) else log_info "Staged $files_staged files for archive creation" # Check disk space in /tmp local temp_available_kb temp_available_kb=$(df /tmp | awk 'NR==2 {print $4}') local temp_available_mb=$((temp_available_kb / 1024)) local staging_size_mb staging_size_mb=$(du -sm "$temp_dir" | cut -f1) log_info "/tmp available space: ${temp_available_mb}MB, staging directory size: ${staging_size_mb}MB" # Check if we have enough space (require 3x staging size for compression) local required_space_mb=$((staging_size_mb * 3)) if [ "$temp_available_mb" -lt "$required_space_mb" ]; then log_error "Insufficient space in /tmp for archive creation. Required: ${required_space_mb}MB, Available: ${temp_available_mb}MB" rm -rf "$temp_dir" backup_errors=$((backup_errors + 1)) else # Create archive with detailed error logging log_info "Creating archive: $(basename "$temp_archive")" local tar_output tar_output=$(tar -czf "$temp_archive" -C "$temp_dir" . 2>&1) local tar_exit_code=$? if [ $tar_exit_code -eq 0 ]; then # Verify archive was actually created and has reasonable size if [ -f "$temp_archive" ]; then local archive_size_mb archive_size_mb=$(du -sm "$temp_archive" | cut -f1) log_success "Archive created successfully: $(basename "$temp_archive") (${archive_size_mb}MB)" # Test archive integrity before moving if tar -tzf "$temp_archive" >/dev/null 2>&1; then log_success "Archive integrity verified" # Move the completed archive to the backup root if mv "$temp_archive" "$final_archive"; then log_success "Archive moved to final location: $(basename "$final_archive")" # Remove individual backup files and staging directory rm -rf "$temp_dir" for nickname in "${!PLEX_FILES[@]}"; do local file="${PLEX_FILES[$nickname]}" local backup_file backup_file="${BACKUP_PATH}/$(basename "$file")" rm -f "$backup_file" "$backup_file.md5" done else log_error "Failed to move archive to final location: $final_archive" log_error "Temporary archive remains at: $temp_archive" rm -rf "$temp_dir" backup_errors=$((backup_errors + 1)) fi else log_error "Archive integrity check failed - archive may be corrupted" log_error "Archive size: ${archive_size_mb}MB" rm -f "$temp_archive" rm -rf "$temp_dir" backup_errors=$((backup_errors + 1)) fi else log_error "Archive file was not created despite tar success" rm -rf "$temp_dir" backup_errors=$((backup_errors + 1)) fi else log_error "Failed to create archive (tar exit code: $tar_exit_code)" if [ -n "$tar_output" ]; then log_error "Tar command output: $tar_output" fi # Additional diagnostic information log_error "Staging directory contents:" find "$temp_dir" -ls 2>&1 | while IFS= read -r line; do log_error " $line" done local temp_usage temp_usage=$(df -h /tmp | awk 'NR==2 {print "Used: " $3 "/" $2 " (" $5 ")"}') log_error "Temp filesystem status: $temp_usage" rm -rf "$temp_dir" backup_errors=$((backup_errors + 1)) fi fi fi fi fi # Send notification local files_list files_list=$(format_backed_up_files "${backed_up_files[@]}") send_notification "Backup Completed" "Successfully backed up $files_list" "success" else log_message "No files needed backup" fi # Cleanup old backups cleanup_old_backups # Track overall backup performance if [ "$files_backed_up" -gt 0 ]; then track_performance "full_backup" "$backup_start" fi track_performance "total_script" "$overall_start" # Generate performance report generate_performance_report # Final summary local total_time=$(($(date +%s) - overall_start)) log_message "Backup process completed at $(date)" log_message "Total execution time: ${total_time}s" log_message "Files backed up: $files_backed_up" log_message "Errors encountered: $backup_errors" # Sync logs to shared location and cleanup old local logs log_info "Post-backup: synchronizing logs and cleaning up old files" sync_logs_to_shared cleanup_old_local_logs if [ "$backup_errors" -gt 0 ]; then log_error "Backup completed with errors" send_notification "Backup Error" "Backup completed with $backup_errors errors" "error" exit 1 else log_success "Enhanced backup completed successfully" local files_list files_list=$(format_backed_up_files "${backed_up_files[@]}") send_notification "Backup Success" "$files_list backed up successfully in ${total_time}s" "success" fi } # Trap to ensure Plex is restarted on script exit trap 'manage_plex_service start' EXIT # Run main function main "$@"