Refactor variable assignments and improve script readability in validate-plex-backups.sh and validate-plex-recovery.sh

- Changed inline variable assignments to separate declaration and assignment for clarity.
- Updated condition checks and log messages for better readability and consistency.
- Added a backup of validate-plex-recovery.sh for safety.
- Introduced a new script run-docker-tests.sh for testing setup in Docker containers.
- Enhanced ssh-login.sh to improve condition checks and logging functionality.
This commit is contained in:
Peter Wood
2025-06-05 17:14:02 -04:00
parent c3f237a321
commit 58b5dea8b4
31 changed files with 5024 additions and 539 deletions

View File

@@ -57,12 +57,7 @@ BLUE='\033[0;34m'
CYAN='\033[0;36m'
NC='\033[0m' # No Color
# Performance tracking variables
SCRIPT_START_TIME=$(date +%s)
BACKUP_START_TIME=""
VERIFICATION_START_TIME=""
SERVICE_STOP_TIME=""
SERVICE_START_TIME=""
# Performance tracking variables (removed unused variables)
# Configuration
MAX_BACKUP_AGE_DAYS=30
@@ -168,7 +163,8 @@ declare -A PLEX_FILES=(
# Logging functions
log_message() {
local message="$1"
local timestamp=$(date '+%Y-%m-%d %H:%M:%S')
local timestamp
timestamp=$(date '+%Y-%m-%d %H:%M:%S')
echo -e "${CYAN}[${timestamp}]${NC} ${message}"
mkdir -p "$LOCAL_LOG_ROOT"
echo "[${timestamp}] $message" >> "${LOCAL_LOG_ROOT}/plex-backup-$(date '+%Y-%m-%d').log" 2>/dev/null || true
@@ -176,7 +172,8 @@ log_message() {
log_error() {
local message="$1"
local timestamp=$(date '+%Y-%m-%d %H:%M:%S')
local timestamp
timestamp=$(date '+%Y-%m-%d %H:%M:%S')
echo -e "${RED}[${timestamp}] ERROR:${NC} ${message}"
mkdir -p "$LOCAL_LOG_ROOT"
echo "[${timestamp}] ERROR: $message" >> "${LOCAL_LOG_ROOT}/plex-backup-$(date '+%Y-%m-%d').log" 2>/dev/null || true
@@ -184,7 +181,8 @@ log_error() {
log_success() {
local message="$1"
local timestamp=$(date '+%Y-%m-%d %H:%M:%S')
local timestamp
timestamp=$(date '+%Y-%m-%d %H:%M:%S')
echo -e "${GREEN}[${timestamp}] SUCCESS:${NC} ${message}"
mkdir -p "$LOCAL_LOG_ROOT"
echo "[${timestamp}] SUCCESS: $message" >> "${LOCAL_LOG_ROOT}/plex-backup-$(date '+%Y-%m-%d').log" 2>/dev/null || true
@@ -192,7 +190,8 @@ log_success() {
log_warning() {
local message="$1"
local timestamp=$(date '+%Y-%m-%d %H:%M:%S')
local timestamp
timestamp=$(date '+%Y-%m-%d %H:%M:%S')
echo -e "${YELLOW}[${timestamp}] WARNING:${NC} ${message}"
mkdir -p "$LOCAL_LOG_ROOT"
echo "[${timestamp}] WARNING: $message" >> "${LOCAL_LOG_ROOT}/plex-backup-$(date '+%Y-%m-%d').log" 2>/dev/null || true
@@ -200,7 +199,8 @@ log_warning() {
log_info() {
local message="$1"
local timestamp=$(date '+%Y-%m-%d %H:%M:%S')
local timestamp
timestamp=$(date '+%Y-%m-%d %H:%M:%S')
echo -e "${BLUE}[${timestamp}] INFO:${NC} ${message}"
mkdir -p "$LOCAL_LOG_ROOT"
echo "[${timestamp}] INFO: $message" >> "${LOCAL_LOG_ROOT}/plex-backup-$(date '+%Y-%m-%d').log" 2>/dev/null || true
@@ -224,7 +224,8 @@ track_performance() {
fi
# Add performance entry
local entry=$(jq -n \
local entry
entry=$(jq -n \
--arg operation "$operation" \
--arg duration "$duration" \
--arg timestamp "$(date -Iseconds)" \
@@ -251,7 +252,8 @@ initialize_logs() {
# Log synchronization functions
sync_logs_to_shared() {
local sync_start_time=$(date +%s)
local sync_start_time
sync_start_time=$(date +%s)
log_info "Starting log synchronization to shared location"
# Ensure shared log directory exists
@@ -272,7 +274,8 @@ sync_logs_to_shared() {
for log_file in "$LOCAL_LOG_ROOT"/*.log "$LOCAL_LOG_ROOT"/*.json; do
if [ -f "$log_file" ]; then
local filename=$(basename "$log_file")
local filename
filename=$(basename "$log_file")
local shared_file="$SHARED_LOG_ROOT/$filename"
# Only copy if file doesn't exist in shared location or local is newer
@@ -288,7 +291,8 @@ sync_logs_to_shared() {
fi
done
local sync_end_time=$(date +%s)
local sync_end_time
sync_end_time=$(date +%s)
local sync_duration=$((sync_end_time - sync_start_time))
if [ $error_count -eq 0 ]; then
@@ -302,7 +306,8 @@ sync_logs_to_shared() {
# Cleanup old local logs (30 day retention)
cleanup_old_local_logs() {
local cleanup_start_time=$(date +%s)
local cleanup_start_time
cleanup_start_time=$(date +%s)
log_info "Starting cleanup of old local logs (30+ days)"
if [ ! -d "$LOCAL_LOG_ROOT" ]; then
@@ -315,7 +320,8 @@ cleanup_old_local_logs() {
# Find and remove log files older than 30 days
while IFS= read -r -d '' old_file; do
local filename=$(basename "$old_file")
local filename
filename=$(basename "$old_file")
if rm "$old_file" 2>/dev/null; then
((cleanup_count++))
log_info "Removed old log: $filename"
@@ -327,12 +333,15 @@ cleanup_old_local_logs() {
# Also clean up old performance log entries (keep structure, remove old entries)
if [ -f "$PERFORMANCE_LOG_FILE" ]; then
local thirty_days_ago=$(date -d '30 days ago' -Iseconds)
local thirty_days_ago
thirty_days_ago=$(date -d '30 days ago' -Iseconds)
local temp_perf_file="${PERFORMANCE_LOG_FILE}.cleanup.tmp"
if jq --arg cutoff "$thirty_days_ago" '[.[] | select(.timestamp >= $cutoff)]' "$PERFORMANCE_LOG_FILE" > "$temp_perf_file" 2>/dev/null; then
local old_count=$(jq length "$PERFORMANCE_LOG_FILE" 2>/dev/null || echo "0")
local new_count=$(jq length "$temp_perf_file" 2>/dev/null || echo "0")
local old_count
old_count=$(jq length "$PERFORMANCE_LOG_FILE" 2>/dev/null || echo "0")
local new_count
new_count=$(jq length "$temp_perf_file" 2>/dev/null || echo "0")
local removed_count=$((old_count - new_count))
if [ "$removed_count" -gt 0 ]; then
@@ -349,7 +358,8 @@ cleanup_old_local_logs() {
fi
fi
local cleanup_end_time=$(date +%s)
local cleanup_end_time
cleanup_end_time=$(date +%s)
local cleanup_duration=$((cleanup_end_time - cleanup_start_time))
if [ $cleanup_count -gt 0 ]; then
@@ -366,7 +376,8 @@ send_notification() {
local title="$1"
local message="$2"
local status="${3:-info}" # success, error, warning, info
local hostname=$(hostname)
local hostname
hostname=$(hostname)
# Console notification
case "$status" in
@@ -412,16 +423,17 @@ format_backed_up_files() {
local files=("$@")
local count=${#files[@]}
if [ $count -eq 0 ]; then
if [ "$count" -eq 0 ]; then
echo "no files"
elif [ $count -eq 1 ]; then
elif [ "$count" -eq 1 ]; then
echo "${files[0]}"
elif [ $count -eq 2 ]; then
elif [ "$count" -eq 2 ]; then
echo "${files[0]} and ${files[1]}"
else
local last_file="${files[-1]}"
local other_files=("${files[@]:0:$((count-1))}")
local other_files_str=$(IFS=', '; echo "${other_files[*]}")
local other_files_str
other_files_str=$(IFS=', '; echo "${other_files[*]}")
echo "${other_files_str}, and ${last_file}"
fi
}
@@ -431,17 +443,20 @@ calculate_checksum() {
local file="$1"
# Use /tmp for cache files to avoid permission issues
local cache_dir="/tmp/plex-backup-cache"
local cache_file="$cache_dir/$(echo "$file" | sed 's|/|_|g').md5"
local file_mtime=$(stat -c %Y "$file" 2>/dev/null || echo "0")
local cache_file="$cache_dir/${file//\//_}.md5"
local file_mtime
file_mtime=$(stat -c %Y "$file" 2>/dev/null || echo "0")
# Create cache directory if it doesn't exist
mkdir -p "$cache_dir" 2>/dev/null || true
# Check if cached checksum exists and is newer than file
if [ -f "$cache_file" ]; then
local cache_mtime=$(stat -c %Y "$cache_file" 2>/dev/null || echo "0")
local cache_mtime
cache_mtime=$(stat -c %Y "$cache_file" 2>/dev/null || echo "0")
if [ "$cache_mtime" -gt "$file_mtime" ]; then
local cached_checksum=$(cat "$cache_file" 2>/dev/null)
local cached_checksum
cached_checksum=$(cat "$cache_file" 2>/dev/null)
if [[ -n "$cached_checksum" && "$cached_checksum" =~ ^[a-f0-9]{32}$ ]]; then
echo "$cached_checksum"
return 0
@@ -480,7 +495,8 @@ calculate_checksum() {
# Check database integrity using Plex SQLite
check_database_integrity() {
local db_file="$1"
local db_name=$(basename "$db_file")
local db_name
db_name=$(basename "$db_file")
log_message "Checking database integrity: $db_name"
@@ -518,10 +534,13 @@ check_database_integrity() {
# Advanced database repair using <https://github.com/ChuckPa/DBRepair/> project methods
repair_database() {
local db_file="$1"
local db_name=$(basename "$db_file")
local db_name
db_name=$(basename "$db_file")
local backup_file="${db_file}.pre-repair-backup"
local timestamp=$(date "+%Y-%m-%d_%H.%M.%S")
local db_dir=$(dirname "$db_file")
local timestamp
timestamp=$(date "+%Y-%m-%d_%H.%M.%S")
local db_dir
db_dir=$(dirname "$db_file")
local temp_dir="${db_dir}/repair-temp-${timestamp}"
log_message "Starting advanced database repair for: $db_name"
@@ -554,7 +573,7 @@ repair_database() {
local new_db_file="${temp_dir}/${db_name}.new"
log_message "Step 2: Dumping database to SQL..."
if sudo "$PLEX_SQLITE" "$db_file" ".dump" > "$dump_file" 2>/dev/null; then
if sudo "$PLEX_SQLITE" "$db_file" ".dump" | sudo tee "$dump_file" >/dev/null 2>&1; then
log_success "Database dumped successfully"
log_message "Step 3: Creating new database from dump..."
@@ -627,7 +646,8 @@ handle_wal_files() {
)
for wal_file in "${wal_files[@]}"; do
local wal_basename=$(basename "$wal_file")
local wal_basename
wal_basename=$(basename "$wal_file")
case "$action" in
"backup")
@@ -670,7 +690,8 @@ handle_wal_files() {
# Enhanced database integrity check with WAL handling
check_database_integrity_with_wal() {
local db_file="$1"
local db_name=$(basename "$db_file")
local db_name
db_name=$(basename "$db_file")
log_message "Checking database integrity with WAL handling: $db_name"
@@ -720,14 +741,16 @@ check_database_integrity_with_wal() {
verify_files_parallel() {
local backup_dir="$1"
local -a pids=()
local temp_dir=$(mktemp -d)
local temp_dir
temp_dir=$(mktemp -d)
local verification_errors=0
if [ "$PARALLEL_VERIFICATION" != true ]; then
# Fall back to sequential verification
for nickname in "${!PLEX_FILES[@]}"; do
local src_file="${PLEX_FILES[$nickname]}"
local dest_file="$backup_dir/$(basename "$src_file")"
local dest_file
dest_file="$backup_dir/$(basename "$src_file")"
if [ -f "$dest_file" ]; then
if ! verify_backup "$src_file" "$dest_file"; then
@@ -743,7 +766,8 @@ verify_files_parallel() {
# Start verification jobs in parallel
for nickname in "${!PLEX_FILES[@]}"; do
local src_file="${PLEX_FILES[$nickname]}"
local dest_file="$backup_dir/$(basename "$src_file")"
local dest_file
dest_file="$backup_dir/$(basename "$src_file")"
if [ -f "$dest_file" ]; then
(
@@ -767,7 +791,8 @@ verify_files_parallel() {
for nickname in "${!PLEX_FILES[@]}"; do
local result_file="$temp_dir/$nickname.result"
if [ -f "$result_file" ]; then
local result=$(cat "$result_file")
local result
result=$(cat "$result_file")
if [ "$result" != "0" ]; then
verification_errors=$((verification_errors + 1))
fi
@@ -861,16 +886,13 @@ verify_backup() {
# Enhanced service management with better monitoring
manage_plex_service() {
local action="$1"
local operation_start=$(date +%s)
local operation_start
operation_start=$(date +%s)
log_message "Managing Plex service: $action"
case "$action" in
stop)
if [ "$action" == "stop" ]; then
SERVICE_STOP_TIME=$(date +%s)
fi
if sudo systemctl stop plexmediaserver.service; then
log_success "Plex service stopped"
# Wait for clean shutdown with progress indicator
@@ -897,10 +919,6 @@ manage_plex_service() {
fi
;;
start)
if [ "$action" == "start" ]; then
SERVICE_START_TIME=$(date +%s)
fi
if sudo systemctl start plexmediaserver.service; then
log_success "Plex service start command issued"
# Wait for service to be fully running with progress indicator
@@ -938,7 +956,8 @@ check_disk_space() {
local backup_dir="$1"
local required_space_mb="$2"
local available_space_kb=$(df "$backup_dir" | awk 'NR==2 {print $4}')
local available_space_kb
available_space_kb=$(df "$backup_dir" | awk 'NR==2 {print $4}')
local available_space_mb=$((available_space_kb / 1024))
if [ "$available_space_mb" -lt "$required_space_mb" ]; then
@@ -957,7 +976,8 @@ estimate_backup_size() {
for nickname in "${!PLEX_FILES[@]}"; do
local file="${PLEX_FILES[$nickname]}"
if [ -f "$file" ]; then
local size_kb=$(du -k "$file" 2>/dev/null | cut -f1)
local size_kb
size_kb=$(du -k "$file" 2>/dev/null | cut -f1)
total_size=$((total_size + size_kb))
fi
done
@@ -977,10 +997,14 @@ generate_performance_report() {
jq -r '.[-10:] | .[] | " \(.operation): \(.duration_seconds)s (\(.timestamp))"' "$PERFORMANCE_LOG_FILE" 2>/dev/null || true
# Calculate averages for common operations
local avg_backup=$(jq '[.[] | select(.operation == "backup") | .duration_seconds] | if length > 0 then add/length else 0 end' "$PERFORMANCE_LOG_FILE" 2>/dev/null || echo "0")
local avg_verification=$(jq '[.[] | select(.operation == "verification") | .duration_seconds] | if length > 0 then add/length else 0 end' "$PERFORMANCE_LOG_FILE" 2>/dev/null || echo "0")
local avg_service_stop=$(jq '[.[] | select(.operation == "service_stop") | .duration_seconds] | if length > 0 then add/length else 0 end' "$PERFORMANCE_LOG_FILE" 2>/dev/null || echo "0")
local avg_service_start=$(jq '[.[] | select(.operation == "service_start") | .duration_seconds] | if length > 0 then add/length else 0 end' "$PERFORMANCE_LOG_FILE" 2>/dev/null || echo "0")
local avg_backup
avg_backup=$(jq '[.[] | select(.operation == "backup") | .duration_seconds] | if length > 0 then add/length else 0 end' "$PERFORMANCE_LOG_FILE" 2>/dev/null || echo "0")
local avg_verification
avg_verification=$(jq '[.[] | select(.operation == "verification") | .duration_seconds] | if length > 0 then add/length else 0 end' "$PERFORMANCE_LOG_FILE" 2>/dev/null || echo "0")
local avg_service_stop
avg_service_stop=$(jq '[.[] | select(.operation == "service_stop") | .duration_seconds] | if length > 0 then add/length else 0 end' "$PERFORMANCE_LOG_FILE" 2>/dev/null || echo "0")
local avg_service_start
avg_service_start=$(jq '[.[] | select(.operation == "service_start") | .duration_seconds] | if length > 0 then add/length else 0 end' "$PERFORMANCE_LOG_FILE" 2>/dev/null || echo "0")
if [ "$avg_backup" != "0" ]; then
log_info "Average backup time: ${avg_backup}s"
@@ -1004,7 +1028,8 @@ cleanup_old_backups() {
find "${BACKUP_ROOT}" -maxdepth 1 -type f -name "plex-backup-*.tar.gz" -mtime +${MAX_BACKUP_AGE_DAYS} -delete 2>/dev/null || true
# Keep only MAX_BACKUPS_TO_KEEP most recent backups
local backup_count=$(find "${BACKUP_ROOT}" -maxdepth 1 -type f -name "plex-backup-*.tar.gz" | wc -l)
local backup_count
backup_count=$(find "${BACKUP_ROOT}" -maxdepth 1 -type f -name "plex-backup-*.tar.gz" | wc -l)
if [ "$backup_count" -gt "$MAX_BACKUPS_TO_KEEP" ]; then
local excess_count=$((backup_count - MAX_BACKUPS_TO_KEEP))
@@ -1103,7 +1128,8 @@ check_integrity_only() {
# Main backup function
main() {
local overall_start=$(date +%s)
local overall_start
overall_start=$(date +%s)
log_message "Starting enhanced Plex backup process at $(date)"
send_notification "Backup Started" "Plex backup process initiated" "info"
@@ -1118,11 +1144,13 @@ main() {
# Check if only doing integrity check
if [ "$INTEGRITY_CHECK_ONLY" = true ]; then
check_integrity_only
# shellcheck disable=SC2317
return $?
fi
# Estimate backup size
local estimated_size_mb=$(estimate_backup_size)
local estimated_size_mb
estimated_size_mb=$(estimate_backup_size)
log_message "Estimated backup size: ${estimated_size_mb}MB"
# Check disk space (require 2x estimated size for safety)
@@ -1217,7 +1245,8 @@ main() {
handle_wal_files "backup" "$BACKUP_PATH"
# Backup files - always perform full backup
local backup_start=$(date +%s)
local backup_start
backup_start=$(date +%s)
for nickname in "${!PLEX_FILES[@]}"; do
local file="${PLEX_FILES[$nickname]}"
@@ -1225,7 +1254,8 @@ main() {
log_message "Backing up: $(basename "$file")"
# Create backup filename without timestamp (use original filename)
local backup_file="${BACKUP_PATH}/$(basename "$file")"
local backup_file
backup_file="${BACKUP_PATH}/$(basename "$file")"
# Copy file
if sudo cp "$file" "$backup_file"; then
@@ -1269,14 +1299,17 @@ main() {
log_error "Backup root directory is not writable: $BACKUP_ROOT"
backup_errors=$((backup_errors + 1))
else
local temp_archive="/tmp/plex-backup-$(date '+%Y%m%d_%H%M%S').tar.gz"
local final_archive="${BACKUP_ROOT}/plex-backup-$(date '+%Y%m%d_%H%M%S').tar.gz"
local temp_archive
temp_archive="/tmp/plex-backup-$(date '+%Y%m%d_%H%M%S').tar.gz"
local final_archive
final_archive="${BACKUP_ROOT}/plex-backup-$(date '+%Y%m%d_%H%M%S').tar.gz"
log_info "Temporary archive: $temp_archive"
log_info "Final archive: $final_archive"
# Create archive in /tmp first, containing only the backed up files
local temp_dir="/tmp/plex-backup-staging-$(date '+%Y%m%d_%H%M%S')"
local temp_dir
temp_dir="/tmp/plex-backup-staging-$(date '+%Y%m%d_%H%M%S')"
if ! mkdir -p "$temp_dir"; then
log_error "Failed to create staging directory: $temp_dir"
backup_errors=$((backup_errors + 1))
@@ -1287,7 +1320,8 @@ main() {
local files_staged=0
for nickname in "${!PLEX_FILES[@]}"; do
local file="${PLEX_FILES[$nickname]}"
local backup_file="${BACKUP_PATH}/$(basename "$file")"
local backup_file
backup_file="${BACKUP_PATH}/$(basename "$file")"
if [ -f "$backup_file" ]; then
if cp "$backup_file" "$temp_dir/"; then
files_staged=$((files_staged + 1))
@@ -1309,9 +1343,11 @@ main() {
log_info "Staged $files_staged files for archive creation"
# Check disk space in /tmp
local temp_available_kb=$(df /tmp | awk 'NR==2 {print $4}')
local temp_available_kb
temp_available_kb=$(df /tmp | awk 'NR==2 {print $4}')
local temp_available_mb=$((temp_available_kb / 1024))
local staging_size_mb=$(du -sm "$temp_dir" | cut -f1)
local staging_size_mb
staging_size_mb=$(du -sm "$temp_dir" | cut -f1)
log_info "/tmp available space: ${temp_available_mb}MB, staging directory size: ${staging_size_mb}MB"
# Check if we have enough space (require 3x staging size for compression)
@@ -1330,7 +1366,8 @@ main() {
if [ $tar_exit_code -eq 0 ]; then
# Verify archive was actually created and has reasonable size
if [ -f "$temp_archive" ]; then
local archive_size_mb=$(du -sm "$temp_archive" | cut -f1)
local archive_size_mb
archive_size_mb=$(du -sm "$temp_archive" | cut -f1)
log_success "Archive created successfully: $(basename "$temp_archive") (${archive_size_mb}MB)"
# Test archive integrity before moving
@@ -1345,7 +1382,8 @@ main() {
rm -rf "$temp_dir"
for nickname in "${!PLEX_FILES[@]}"; do
local file="${PLEX_FILES[$nickname]}"
local backup_file="${BACKUP_PATH}/$(basename "$file")"
local backup_file
backup_file="${BACKUP_PATH}/$(basename "$file")"
rm -f "$backup_file" "$backup_file.md5"
done
else
@@ -1374,11 +1412,12 @@ main() {
# Additional diagnostic information
log_error "Staging directory contents:"
ls -la "$temp_dir" 2>&1 | while IFS= read -r line; do
find "$temp_dir" -ls 2>&1 | while IFS= read -r line; do
log_error " $line"
done
local temp_usage=$(df -h /tmp | awk 'NR==2 {print "Used: " $3 "/" $2 " (" $5 ")"}')
local temp_usage
temp_usage=$(df -h /tmp | awk 'NR==2 {print "Used: " $3 "/" $2 " (" $5 ")"}')
log_error "Temp filesystem status: $temp_usage"
rm -rf "$temp_dir"
@@ -1390,7 +1429,8 @@ main() {
fi
# Send notification
local files_list=$(format_backed_up_files "${backed_up_files[@]}")
local files_list
files_list=$(format_backed_up_files "${backed_up_files[@]}")
send_notification "Backup Completed" "Successfully backed up $files_list" "success"
else
log_message "No files needed backup"
@@ -1426,7 +1466,8 @@ main() {
exit 1
else
log_success "Enhanced backup completed successfully"
local files_list=$(format_backed_up_files "${backed_up_files[@]}")
local files_list
files_list=$(format_backed_up_files "${backed_up_files[@]}")
send_notification "Backup Success" "$files_list backed up successfully in ${total_time}s" "success"
fi
}

View File

@@ -60,8 +60,6 @@ NC='\033[0m' # No Color
# Configuration
PLEX_DB_DIR="/var/lib/plexmediaserver/Library/Application Support/Plex Media Server/Plug-in Support/Databases"
PLEX_USER="plex"
PLEX_GROUP="plex"
BACKUP_TIMESTAMP=$(date +"%Y%m%d_%H%M%S")
RECOVERY_LOG="/home/acedanger/shell/plex/logs/icu-recovery-${BACKUP_TIMESTAMP}.log"
@@ -72,7 +70,8 @@ mkdir -p "$(dirname "$RECOVERY_LOG")"
log_message() {
local level="$1"
local message="$2"
local timestamp=$(date '+%Y-%m-%d %H:%M:%S')
local timestamp
timestamp=$(date '+%Y-%m-%d %H:%M:%S')
echo "[$timestamp] [$level] $message" | tee -a "$RECOVERY_LOG"
}
@@ -115,7 +114,8 @@ verify_database_basic() {
return 1
fi
local file_size=$(stat -c%s "$db_file" 2>/dev/null || stat -f%z "$db_file" 2>/dev/null)
local file_size
file_size=$(stat -c%s "$db_file" 2>/dev/null || stat -f%z "$db_file" 2>/dev/null)
if [[ $file_size -lt 1024 ]]; then
print_status "$RED" "$db_name: File is too small ($file_size bytes)"
return 1
@@ -132,7 +132,8 @@ verify_database_basic() {
print_status "$GREEN" "$db_name: Basic SQLite operations successful"
# Count tables
local table_count=$(sqlite3 "$db_file" "SELECT COUNT(*) FROM sqlite_master WHERE type='table';" 2>/dev/null || echo "0")
local table_count
table_count=$(sqlite3 "$db_file" "SELECT COUNT(*) FROM sqlite_master WHERE type='table';" 2>/dev/null || echo "0")
print_status "$GREEN" "$db_name: Contains $table_count tables"
return 0
@@ -262,12 +263,14 @@ check_database_sizes() {
local blobs_db="${PLEX_DB_DIR}/com.plexapp.plugins.library.blobs.db"
if [[ -f "$main_db" ]]; then
local main_size=$(du -h "$main_db" | cut -f1)
local main_size
main_size=$(du -h "$main_db" | cut -f1)
print_status "$GREEN" "Main database size: $main_size"
fi
if [[ -f "$blobs_db" ]]; then
local blobs_size=$(du -h "$blobs_db" | cut -f1)
local blobs_size
blobs_size=$(du -h "$blobs_db" | cut -f1)
print_status "$GREEN" "Blobs database size: $blobs_size"
fi
}

View File

@@ -75,30 +75,35 @@ declare -a FAILED_INTEGRATION_TESTS=()
# Logging functions
log_test() {
local timestamp=$(date '+%Y-%m-%d %H:%M:%S')
local timestamp
timestamp=$(date '+%Y-%m-%d %H:%M:%S')
echo -e "${CYAN}[INTEGRATION ${timestamp}]${NC} $1"
}
log_pass() {
local timestamp=$(date '+%Y-%m-%d %H:%M:%S')
local timestamp
timestamp=$(date '+%Y-%m-%d %H:%M:%S')
echo -e "${GREEN}[PASS ${timestamp}]${NC} $1"
INTEGRATION_ASSERTIONS_PASSED=$((INTEGRATION_ASSERTIONS_PASSED + 1))
}
log_fail() {
local timestamp=$(date '+%Y-%m-%d %H:%M:%S')
local timestamp
timestamp=$(date '+%Y-%m-%d %H:%M:%S')
echo -e "${RED}[FAIL ${timestamp}]${NC} $1"
INTEGRATION_ASSERTIONS_FAILED=$((INTEGRATION_ASSERTIONS_FAILED + 1))
FAILED_INTEGRATION_TESTS+=("$1")
}
log_info() {
local timestamp=$(date '+%Y-%m-%d %H:%M:%S')
local timestamp
timestamp=$(date '+%Y-%m-%d %H:%M:%S')
echo -e "${BLUE}[INFO ${timestamp}]${NC} $1"
}
log_warn() {
local timestamp=$(date '+%Y-%m-%d %H:%M:%S')
local timestamp
timestamp=$(date '+%Y-%m-%d %H:%M:%S')
echo -e "${YELLOW}[WARN ${timestamp}]${NC} $1"
}
@@ -209,13 +214,16 @@ test_performance_monitoring() {
echo "[]" > "$test_perf_log"
# Simulate performance tracking
local start_time=$(date +%s)
local start_time
start_time=$(date +%s)
sleep 1
local end_time=$(date +%s)
local end_time
end_time=$(date +%s)
local duration=$((end_time - start_time))
# Create performance entry
local entry=$(jq -n \
local entry
entry=$(jq -n \
--arg operation "integration_test" \
--arg duration "$duration" \
--arg timestamp "$(date -Iseconds)" \
@@ -230,7 +238,8 @@ test_performance_monitoring() {
mv "${test_perf_log}.tmp" "$test_perf_log"
# Verify entry was added
local entry_count=$(jq length "$test_perf_log")
local entry_count
entry_count=$(jq length "$test_perf_log")
if [ "$entry_count" -eq 1 ]; then
log_pass "Performance monitoring integration works"
else
@@ -338,7 +347,8 @@ test_parallel_processing() {
INTEGRATION_TEST_FUNCTIONS=$((INTEGRATION_TEST_FUNCTIONS + 1))
log_test "Parallel Processing Capabilities"
local temp_dir=$(mktemp -d)
local temp_dir
temp_dir=$(mktemp -d)
local -a pids=()
local total_jobs=3
local completed_jobs=0
@@ -347,7 +357,7 @@ test_parallel_processing() {
for i in $(seq 1 $total_jobs); do
(
# Simulate parallel work
sleep 0.$i
sleep "0.$i"
echo "Job $i completed" > "$temp_dir/job_$i.result"
) &
pids+=($!)
@@ -361,7 +371,8 @@ test_parallel_processing() {
done
# Verify results
local result_files=$(find "$temp_dir" -name "job_*.result" | wc -l)
local result_files
result_files=$(find "$temp_dir" -name "job_*.result" | wc -l)
# Cleanup
rm -rf "$temp_dir"
@@ -386,15 +397,19 @@ test_checksum_caching() {
echo "checksum test content" > "$test_file"
# First checksum calculation (should create cache)
local checksum1=$(md5sum "$test_file" | cut -d' ' -f1)
local checksum1
checksum1=$(md5sum "$test_file" | cut -d' ' -f1)
echo "$checksum1" > "$cache_file"
# Simulate cache check
local file_mtime=$(stat -c %Y "$test_file")
local cache_mtime=$(stat -c %Y "$cache_file")
local file_mtime
file_mtime=$(stat -c %Y "$test_file")
local cache_mtime
cache_mtime=$(stat -c %Y "$cache_file")
if [ "$cache_mtime" -ge "$file_mtime" ]; then
local cached_checksum=$(cat "$cache_file")
local cached_checksum
cached_checksum=$(cat "$cache_file")
if [ "$cached_checksum" = "$checksum1" ]; then
log_pass "Checksum caching system works correctly"
else

View File

@@ -0,0 +1,526 @@
#!/bin/bash
################################################################################
# Plex Backup System Integration Test Suite
################################################################################
#
# Author: Peter Wood <peter@peterwood.dev>
# Description: End-to-end integration testing framework for the complete Plex
# backup ecosystem. Tests backup, restoration, validation, and
# monitoring systems in controlled environments without affecting
# production Plex installations.
#
# Features:
# - Full workflow integration testing
# - Isolated test environment creation
# - Production-safe testing procedures
# - Multi-scenario testing (normal, error, edge cases)
# - Performance benchmarking under load
# - Service integration validation
# - Cross-script compatibility testing
#
# Related Scripts:
# - backup-plex.sh: Primary backup system under test
# - restore-plex.sh: Restoration workflow testing
# - validate-plex-backups.sh: Validation system testing
# - monitor-plex-backup.sh: Monitoring integration
# - test-plex-backup.sh: Unit testing complement
# - plex.sh: Service management integration
#
# Usage:
# ./integration-test-plex.sh # Full integration test suite
# ./integration-test-plex.sh --quick # Quick smoke tests
# ./integration-test-plex.sh --performance # Performance benchmarks
# ./integration-test-plex.sh --cleanup # Clean test artifacts
#
# Dependencies:
# - All Plex backup scripts in this directory
# - sqlite3 or Plex SQLite binary
# - Temporary filesystem space (for test environments)
# - systemctl (for service testing scenarios)
#
# Exit Codes:
# 0 - All integration tests passed
# 1 - General error
# 2 - Integration test failures
# 3 - Test environment setup failure
# 4 - Performance benchmarks failed
#
################################################################################
# Plex Backup Integration Test Suite
# This script tests the enhanced backup features in a controlled environment
# without affecting production Plex installation
set -e
# Color codes for output
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
CYAN='\033[0;36m'
NC='\033[0m' # No Color
# Test configuration
SCRIPT_DIR="$(dirname "$(readlink -f "$0")")"
TEST_DIR="/tmp/plex-integration-test-$(date +%s)"
BACKUP_SCRIPT="$SCRIPT_DIR/backup-plex.sh"
# Test counters
INTEGRATION_TEST_FUNCTIONS=0
INTEGRATION_ASSERTIONS_PASSED=0
INTEGRATION_ASSERTIONS_FAILED=0
declare -a FAILED_INTEGRATION_TESTS=()
# Logging functions
log_test() {
local timestamp=$(date '+%Y-%m-%d %H:%M:%S')
echo -e "${CYAN}[INTEGRATION ${timestamp}]${NC} $1"
}
log_pass() {
local timestamp=$(date '+%Y-%m-%d %H:%M:%S')
echo -e "${GREEN}[PASS ${timestamp}]${NC} $1"
INTEGRATION_ASSERTIONS_PASSED=$((INTEGRATION_ASSERTIONS_PASSED + 1))
}
log_fail() {
local timestamp=$(date '+%Y-%m-%d %H:%M:%S')
echo -e "${RED}[FAIL ${timestamp}]${NC} $1"
INTEGRATION_ASSERTIONS_FAILED=$((INTEGRATION_ASSERTIONS_FAILED + 1))
FAILED_INTEGRATION_TESTS+=("$1")
}
log_info() {
local timestamp=$(date '+%Y-%m-%d %H:%M:%S')
echo -e "${BLUE}[INFO ${timestamp}]${NC} $1"
}
log_warn() {
local timestamp=$(date '+%Y-%m-%d %H:%M:%S')
echo -e "${YELLOW}[WARN ${timestamp}]${NC} $1"
}
# Setup integration test environment
setup_integration_environment() {
log_info "Setting up integration test environment"
# Create test directories
mkdir -p "$TEST_DIR"
mkdir -p "$TEST_DIR/mock_plex_data"
mkdir -p "$TEST_DIR/backup_destination"
mkdir -p "$TEST_DIR/logs"
# Create mock Plex database files with realistic content
create_mock_database "$TEST_DIR/mock_plex_data/com.plexapp.plugins.library.db"
create_mock_database "$TEST_DIR/mock_plex_data/com.plexapp.plugins.library.blobs.db"
# Create mock Preferences.xml
create_mock_preferences "$TEST_DIR/mock_plex_data/Preferences.xml"
# Create mock WAL files to test WAL handling
echo "WAL data simulation" > "$TEST_DIR/mock_plex_data/com.plexapp.plugins.library.db-wal"
echo "SHM data simulation" > "$TEST_DIR/mock_plex_data/com.plexapp.plugins.library.db-shm"
log_info "Integration test environment ready"
}
# Create mock SQLite database for testing
create_mock_database() {
local db_file="$1"
# Create a proper SQLite database with some test data
sqlite3 "$db_file" << 'EOF'
CREATE TABLE library_sections (
id INTEGER PRIMARY KEY,
name TEXT,
type INTEGER,
agent TEXT
);
INSERT INTO library_sections (name, type, agent) VALUES
('Movies', 1, 'com.plexapp.agents.imdb'),
('TV Shows', 2, 'com.plexapp.agents.thetvdb'),
('Music', 8, 'com.plexapp.agents.lastfm');
CREATE TABLE metadata_items (
id INTEGER PRIMARY KEY,
title TEXT,
year INTEGER,
added_at DATETIME DEFAULT CURRENT_TIMESTAMP
);
INSERT INTO metadata_items (title, year) VALUES
('Test Movie', 2023),
('Another Movie', 2024),
('Test Show', 2022);
-- Add some indexes to make it more realistic
CREATE INDEX idx_metadata_title ON metadata_items(title);
CREATE INDEX idx_library_sections_type ON library_sections(type);
EOF
log_info "Created mock database: $(basename "$db_file")"
}
# Create mock Preferences.xml
create_mock_preferences() {
local pref_file="$1"
cat > "$pref_file" << 'EOF'
<?xml version="1.0" encoding="utf-8"?>
<Preferences OldestPreviousVersion="1.32.8.7639-fb6452ebf" MachineIdentifier="test-machine-12345" ProcessedMachineIdentifier="test-processed-12345" AnonymousMachineIdentifier="test-anon-12345" FriendlyName="Test Plex Server" ManualPortMappingMode="1" TranscoderTempDirectory="/tmp" />
EOF
log_info "Created mock preferences file"
}
# Test command line argument parsing
test_command_line_parsing() {
INTEGRATION_TEST_FUNCTIONS=$((INTEGRATION_TEST_FUNCTIONS + 1))
log_test "Command Line Argument Parsing"
# Test help output
if "$BACKUP_SCRIPT" --help | grep -q "Usage:"; then
log_pass "Help output is functional"
else
log_fail "Help output test failed"
return 1
fi
# Test invalid argument handling
if ! "$BACKUP_SCRIPT" --invalid-option >/dev/null 2>&1; then
log_pass "Invalid argument handling works correctly"
else
log_fail "Invalid argument handling test failed"
return 1
fi
}
# Test performance monitoring features
test_performance_monitoring() {
INTEGRATION_TEST_FUNCTIONS=$((INTEGRATION_TEST_FUNCTIONS + 1))
log_test "Performance Monitoring Features"
local test_perf_log="$TEST_DIR/test-performance.json"
# Initialize performance log
echo "[]" > "$test_perf_log"
# Simulate performance tracking
local start_time=$(date +%s)
sleep 1
local end_time=$(date +%s)
local duration=$((end_time - start_time))
# Create performance entry
local entry=$(jq -n \
--arg operation "integration_test" \
--arg duration "$duration" \
--arg timestamp "$(date -Iseconds)" \
'{
operation: $operation,
duration_seconds: ($duration | tonumber),
timestamp: $timestamp
}')
# Add to log
jq --argjson entry "$entry" '. += [$entry]' "$test_perf_log" > "${test_perf_log}.tmp" && \
mv "${test_perf_log}.tmp" "$test_perf_log"
# Verify entry was added
local entry_count=$(jq length "$test_perf_log")
if [ "$entry_count" -eq 1 ]; then
log_pass "Performance monitoring integration works"
else
log_fail "Performance monitoring integration failed"
return 1
fi
}
# Test notification system with mock endpoints
test_notification_system() {
INTEGRATION_TEST_FUNCTIONS=$((INTEGRATION_TEST_FUNCTIONS + 1))
log_test "Notification System Integration"
# Test webhook notification (mock)
local webhook_test_log="$TEST_DIR/webhook_test.log"
# Mock webhook function
test_send_webhook() {
local url="$1"
local payload="$2"
# Simulate webhook call
echo "Webhook URL: $url" > "$webhook_test_log"
echo "Payload: $payload" >> "$webhook_test_log"
return 0
}
# Test notification
if test_send_webhook "https://example.com/webhook" '{"test": "data"}'; then
if [ -f "$webhook_test_log" ] && grep -q "Webhook URL" "$webhook_test_log"; then
log_pass "Webhook notification integration works"
else
log_fail "Webhook notification integration failed"
return 1
fi
else
log_fail "Webhook notification test failed"
return 1
fi
}
# Test backup validation system
test_backup_validation() {
INTEGRATION_TEST_FUNCTIONS=$((INTEGRATION_TEST_FUNCTIONS + 1))
log_test "Backup Validation System"
local test_backup_dir="$TEST_DIR/test_backup_20250525"
mkdir -p "$test_backup_dir"
# Create test backup files
cp "$TEST_DIR/mock_plex_data/"*.db "$test_backup_dir/"
cp "$TEST_DIR/mock_plex_data/Preferences.xml" "$test_backup_dir/"
# Test validation script
if [ -f "$SCRIPT_DIR/validate-plex-backups.sh" ]; then
# Mock the validation by checking file presence
local files_present=0
for file in com.plexapp.plugins.library.db com.plexapp.plugins.library.blobs.db Preferences.xml; do
if [ -f "$test_backup_dir/$file" ]; then
files_present=$((files_present + 1))
fi
done
if [ "$files_present" -eq 3 ]; then
log_pass "Backup validation system works"
else
log_fail "Backup validation system failed - missing files"
return 1
fi
else
log_warn "Validation script not found, skipping test"
fi
}
# Test database integrity checking
test_database_integrity_checking() {
INTEGRATION_TEST_FUNCTIONS=$((INTEGRATION_TEST_FUNCTIONS + 1))
log_test "Database Integrity Checking"
# Test with good database
local test_db="$TEST_DIR/mock_plex_data/com.plexapp.plugins.library.db"
# Run integrity check using sqlite3 (since we can't use Plex SQLite in test)
if sqlite3 "$test_db" "PRAGMA integrity_check;" | grep -q "ok"; then
log_pass "Database integrity checking works for valid database"
else
log_fail "Database integrity checking failed for valid database"
return 1
fi
# Test with corrupted database
local corrupted_db="$TEST_DIR/corrupted.db"
echo "This is not a valid SQLite database" > "$corrupted_db"
if ! sqlite3 "$corrupted_db" "PRAGMA integrity_check;" 2>/dev/null | grep -q "ok"; then
log_pass "Database integrity checking correctly detects corruption"
else
log_fail "Database integrity checking failed to detect corruption"
return 1
fi
}
# Test parallel processing capabilities
test_parallel_processing() {
INTEGRATION_TEST_FUNCTIONS=$((INTEGRATION_TEST_FUNCTIONS + 1))
log_test "Parallel Processing Capabilities"
local temp_dir=$(mktemp -d)
local -a pids=()
local total_jobs=3
local completed_jobs=0
# Start parallel jobs
for i in $(seq 1 $total_jobs); do
(
# Simulate parallel work
sleep 0.$i
echo "Job $i completed" > "$temp_dir/job_$i.result"
) &
pids+=($!)
done
# Wait for all jobs
for pid in "${pids[@]}"; do
if wait "$pid"; then
completed_jobs=$((completed_jobs + 1))
fi
done
# Verify results
local result_files=$(find "$temp_dir" -name "job_*.result" | wc -l)
# Cleanup
rm -rf "$temp_dir"
if [ "$completed_jobs" -eq "$total_jobs" ] && [ "$result_files" -eq "$total_jobs" ]; then
log_pass "Parallel processing works correctly"
else
log_fail "Parallel processing test failed"
return 1
fi
}
# Test checksum caching system
test_checksum_caching() {
INTEGRATION_TEST_FUNCTIONS=$((INTEGRATION_TEST_FUNCTIONS + 1))
log_test "Checksum Caching System"
local test_file="$TEST_DIR/checksum_test.txt"
local cache_file="${test_file}.md5"
# Create test file
echo "checksum test content" > "$test_file"
# First checksum calculation (should create cache)
local checksum1=$(md5sum "$test_file" | cut -d' ' -f1)
echo "$checksum1" > "$cache_file"
# Simulate cache check
local file_mtime=$(stat -c %Y "$test_file")
local cache_mtime=$(stat -c %Y "$cache_file")
if [ "$cache_mtime" -ge "$file_mtime" ]; then
local cached_checksum=$(cat "$cache_file")
if [ "$cached_checksum" = "$checksum1" ]; then
log_pass "Checksum caching system works correctly"
else
log_fail "Checksum caching system failed - checksum mismatch"
return 1
fi
else
log_fail "Checksum caching system failed - cache timing issue"
return 1
fi
}
# Test WAL file handling
test_wal_file_handling() {
INTEGRATION_TEST_FUNCTIONS=$((INTEGRATION_TEST_FUNCTIONS + 1))
log_test "WAL File Handling"
local test_db="$TEST_DIR/mock_plex_data/com.plexapp.plugins.library.db"
local wal_file="${test_db}-wal"
local shm_file="${test_db}-shm"
# Verify WAL files exist
if [ -f "$wal_file" ] && [ -f "$shm_file" ]; then
# Test WAL checkpoint simulation
if sqlite3 "$test_db" "PRAGMA wal_checkpoint(FULL);" 2>/dev/null; then
log_pass "WAL file handling works correctly"
else
log_pass "WAL checkpoint simulation completed (mock environment)"
fi
else
log_pass "WAL file handling test completed (no WAL files in mock)"
fi
}
# Cleanup integration test environment
cleanup_integration_environment() {
if [ -d "$TEST_DIR" ]; then
log_info "Cleaning up integration test environment"
rm -rf "$TEST_DIR"
fi
}
# Generate integration test report
generate_integration_report() {
local timestamp=$(date '+%Y-%m-%d %H:%M:%S')
echo
echo "=================================================="
echo " PLEX BACKUP INTEGRATION TEST REPORT"
echo "=================================================="
echo "Test Run: $timestamp"
echo "Test Functions: $INTEGRATION_TEST_FUNCTIONS"
echo "Total Assertions: $((INTEGRATION_ASSERTIONS_PASSED + INTEGRATION_ASSERTIONS_FAILED))"
echo "Assertions Passed: $INTEGRATION_ASSERTIONS_PASSED"
echo "Assertions Failed: $INTEGRATION_ASSERTIONS_FAILED"
echo
if [ $INTEGRATION_ASSERTIONS_FAILED -gt 0 ]; then
echo "FAILED ASSERTIONS:"
for failed_test in "${FAILED_INTEGRATION_TESTS[@]}"; do
echo " - $failed_test"
done
echo
fi
local success_rate=0
local total_assertions=$((INTEGRATION_ASSERTIONS_PASSED + INTEGRATION_ASSERTIONS_FAILED))
if [ $total_assertions -gt 0 ]; then
success_rate=$(( (INTEGRATION_ASSERTIONS_PASSED * 100) / total_assertions ))
fi
echo "Success Rate: ${success_rate}%"
echo
if [ $INTEGRATION_ASSERTIONS_FAILED -eq 0 ]; then
log_pass "All integration tests passed successfully!"
echo
echo "✅ The enhanced Plex backup system is ready for production use!"
echo
echo "Next Steps:"
echo " 1. Test with real webhook endpoints if using webhook notifications"
echo " 2. Test email notifications with configured sendmail"
echo " 3. Run a test backup in a non-production environment"
echo " 4. Set up automated backup scheduling with cron"
echo " 5. Monitor performance logs for optimization opportunities"
else
log_fail "Some integration tests failed - review output above"
fi
}
# Main execution
main() {
log_info "Starting Plex Backup Integration Tests"
# Ensure backup script exists
if [ ! -f "$BACKUP_SCRIPT" ]; then
log_fail "Backup script not found: $BACKUP_SCRIPT"
exit 1
fi
# Setup test environment
setup_integration_environment
# Trap cleanup on exit
trap cleanup_integration_environment EXIT SIGINT SIGTERM
# Run integration tests
test_command_line_parsing
test_performance_monitoring
test_notification_system
test_backup_validation
test_database_integrity_checking
test_parallel_processing
test_checksum_caching
test_wal_file_handling
# Generate report
generate_integration_report
# Return appropriate exit code
if [ $INTEGRATION_ASSERTIONS_FAILED -eq 0 ]; then
exit 0
else
exit 1
fi
}
# Run main function
main "$@"

View File

@@ -207,12 +207,16 @@ check_backup_status() {
log_status "OK" "Total backups: $backup_count"
# Find latest backup
local latest_backup=$(find "$BACKUP_ROOT" -maxdepth 1 -type f -name "plex-backup-*.tar.gz" 2>/dev/null | sort | tail -1)
local latest_backup
latest_backup=$(find "$BACKUP_ROOT" -maxdepth 1 -type f -name "plex-backup-*.tar.gz" 2>/dev/null | sort | tail -1)
if [ -n "$latest_backup" ]; then
local backup_filename=$(basename "$latest_backup")
local backup_filename
backup_filename=$(basename "$latest_backup")
# Extract date from filename: plex-backup-YYYYMMDD_HHMMSS.tar.gz
local backup_date=$(echo "$backup_filename" | sed 's/plex-backup-//' | sed 's/_.*$//')
local readable_date=$(date -d "${backup_date:0:4}-${backup_date:4:2}-${backup_date:6:2}" '+%B %d, %Y' 2>/dev/null || echo "Invalid date")
local backup_date
backup_date=$(echo "$backup_filename" | sed 's/plex-backup-//' | sed 's/_.*$//')
local readable_date
readable_date=$(date -d "${backup_date:0:4}-${backup_date:4:2}-${backup_date:6:2}" '+%B %d, %Y' 2>/dev/null || echo "Invalid date")
local backup_age_days=$(( ($(date +%s) - $(date -d "${backup_date:0:4}-${backup_date:4:2}-${backup_date:6:2}" +%s 2>/dev/null || echo "0")) / 86400 ))
if [ "$backup_age_days" -le 1 ]; then
@@ -224,11 +228,13 @@ check_backup_status() {
fi
# Check backup size
local backup_size=$(du -sh "$latest_backup" 2>/dev/null | cut -f1)
local backup_size
backup_size=$(du -sh "$latest_backup" 2>/dev/null | cut -f1)
log_status "INFO" "Latest backup size: $backup_size"
# Check backup contents (via tar listing)
local file_count=$(tar -tzf "$latest_backup" 2>/dev/null | wc -l)
local file_count
file_count=$(tar -tzf "$latest_backup" 2>/dev/null | wc -l)
log_status "INFO" "Files in latest backup: $file_count"
fi
else
@@ -237,9 +243,12 @@ check_backup_status() {
# Disk usage
if [ -d "$BACKUP_ROOT" ]; then
local total_backup_size=$(du -sh "$BACKUP_ROOT" 2>/dev/null | cut -f1)
local available_space=$(df -h "$BACKUP_ROOT" 2>/dev/null | awk 'NR==2 {print $4}')
local used_percentage=$(df "$BACKUP_ROOT" 2>/dev/null | awk 'NR==2 {print $5}' | sed 's/%//')
local total_backup_size
total_backup_size=$(du -sh "$BACKUP_ROOT" 2>/dev/null | cut -f1)
local available_space
available_space=$(df -h "$BACKUP_ROOT" 2>/dev/null | awk 'NR==2 {print $4}')
local used_percentage
used_percentage=$(df "$BACKUP_ROOT" 2>/dev/null | awk 'NR==2 {print $5}' | sed 's/%//')
log_status "INFO" "Total backup storage: $total_backup_size"
log_status "INFO" "Available space: $available_space"
@@ -267,15 +276,20 @@ show_performance_metrics() {
log_status "OK" "Performance log found"
# Recent operations
local recent_count=$(jq length "$PERFORMANCE_LOG_FILE" 2>/dev/null || echo "0")
local recent_count
recent_count=$(jq length "$PERFORMANCE_LOG_FILE" 2>/dev/null || echo "0")
log_status "INFO" "Total logged operations: $recent_count"
if [ "$recent_count" -gt 0 ]; then
# Average times for different operations
local avg_backup=$(jq '[.[] | select(.operation == "full_backup") | .duration_seconds] | if length > 0 then add/length else 0 end' "$PERFORMANCE_LOG_FILE" 2>/dev/null || echo "0")
local avg_verification=$(jq '[.[] | select(.operation == "verification") | .duration_seconds] | if length > 0 then add/length else 0 end' "$PERFORMANCE_LOG_FILE" 2>/dev/null || echo "0")
local avg_service_stop=$(jq '[.[] | select(.operation == "service_stop") | .duration_seconds] | if length > 0 then add/length else 0 end' "$PERFORMANCE_LOG_FILE" 2>/dev/null || echo "0")
local avg_service_start=$(jq '[.[] | select(.operation == "service_start") | .duration_seconds] | if length > 0 then add/length else 0 end' "$PERFORMANCE_LOG_FILE" 2>/dev/null || echo "0")
local avg_backup
avg_backup=$(jq '[.[] | select(.operation == "full_backup") | .duration_seconds] | if length > 0 then add/length else 0 end' "$PERFORMANCE_LOG_FILE" 2>/dev/null || echo "0")
local avg_verification
avg_verification=$(jq '[.[] | select(.operation == "verification") | .duration_seconds] | if length > 0 then add/length else 0 end' "$PERFORMANCE_LOG_FILE" 2>/dev/null || echo "0")
local avg_service_stop
avg_service_stop=$(jq '[.[] | select(.operation == "service_stop") | .duration_seconds] | if length > 0 then add/length else 0 end' "$PERFORMANCE_LOG_FILE" 2>/dev/null || echo "0")
local avg_service_start
avg_service_start=$(jq '[.[] | select(.operation == "service_start") | .duration_seconds] | if length > 0 then add/length else 0 end' "$PERFORMANCE_LOG_FILE" 2>/dev/null || echo "0")
if [ "$avg_backup" != "0" ] && [ "$avg_backup" != "null" ]; then
log_status "INFO" "Average backup time: ${avg_backup}s"
@@ -307,9 +321,11 @@ show_recent_activity() {
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
# Check recent log files
local recent_log=$(find_most_recent_log "plex-backup-*.log")
local recent_log
recent_log=$(find_most_recent_log "plex-backup-*.log")
if [ -n "$recent_log" ]; then
local log_date=$(basename "$recent_log" | sed 's/plex-backup-//' | sed 's/.log//')
local log_date
log_date=$(basename "$recent_log" | sed 's/plex-backup-//' | sed 's/.log//')
local log_location=""
if [[ "$recent_log" == "$LOCAL_LOG_ROOT"* ]]; then
log_location=" (local)"
@@ -319,8 +335,10 @@ show_recent_activity() {
log_status "INFO" "Most recent log: $log_date$log_location"
# Check for errors in recent log
local error_count=$(grep -c "ERROR:" "$recent_log" 2>/dev/null || echo "0")
local warning_count=$(grep -c "WARNING:" "$recent_log" 2>/dev/null || echo "0")
local error_count
error_count=$(grep -c "ERROR:" "$recent_log" 2>/dev/null || echo "0")
local warning_count
warning_count=$(grep -c "WARNING:" "$recent_log" 2>/dev/null || echo "0")
if [ "$error_count" -eq 0 ] && [ "$warning_count" -eq 0 ]; then
log_status "OK" "No errors or warnings in recent log"
@@ -357,10 +375,12 @@ show_scheduling_status() {
# Check systemd timers
if systemctl list-timers --all 2>/dev/null | grep -q "plex-backup"; then
log_status "OK" "Systemd timer configured"
local timer_status=$(systemctl is-active plex-backup.timer 2>/dev/null || echo "inactive")
local timer_status
timer_status=$(systemctl is-active plex-backup.timer 2>/dev/null || echo "inactive")
if [ "$timer_status" = "active" ]; then
log_status "OK" "Timer is active"
local next_run=$(systemctl list-timers plex-backup.timer 2>/dev/null | grep "plex-backup" | awk '{print $1, $2}')
local next_run
next_run=$(systemctl list-timers plex-backup.timer 2>/dev/null | grep "plex-backup" | awk '{print $1, $2}')
if [ -n "$next_run" ]; then
log_status "INFO" "Next run: $next_run"
fi
@@ -383,11 +403,14 @@ show_recommendations() {
# Check backup age
if [ -d "$BACKUP_ROOT" ]; then
local latest_backup=$(find "$BACKUP_ROOT" -maxdepth 1 -type f -name "plex-backup-*.tar.gz" 2>/dev/null | sort | tail -1)
local latest_backup
latest_backup=$(find "$BACKUP_ROOT" -maxdepth 1 -type f -name "plex-backup-*.tar.gz" 2>/dev/null | sort | tail -1)
if [ -n "$latest_backup" ]; then
local backup_filename=$(basename "$latest_backup")
local backup_filename
backup_filename=$(basename "$latest_backup")
# Extract date from filename: plex-backup-YYYYMMDD_HHMMSS.tar.gz
local backup_date=$(echo "$backup_filename" | sed 's/plex-backup-//' | sed 's/_.*$//')
local backup_date
backup_date=$(echo "$backup_filename" | sed 's/plex-backup-//' | sed 's/_.*$//')
local backup_age_days=$(( ($(date +%s) - $(date -d "${backup_date:0:4}-${backup_date:4:2}-${backup_date:6:2}" +%s 2>/dev/null || echo "0")) / 86400 ))
if [ "$backup_age_days" -gt 7 ]; then
recommendations+=("Consider running a manual backup - latest backup is $backup_age_days days old")
@@ -408,7 +431,8 @@ show_recommendations() {
# Check disk space
if [ -d "$BACKUP_ROOT" ]; then
local used_percentage=$(df "$BACKUP_ROOT" 2>/dev/null | awk 'NR==2 {print $5}' | sed 's/%//')
local used_percentage
used_percentage=$(df "$BACKUP_ROOT" 2>/dev/null | awk 'NR==2 {print $5}' | sed 's/%//')
if [ -n "$used_percentage" ] && [ "$used_percentage" -gt 85 ]; then
recommendations+=("Backup disk usage is high ($used_percentage%) - consider cleaning old backups")
fi

View File

@@ -78,7 +78,8 @@ mkdir -p "$(dirname "$RECOVERY_LOG")"
log_message() {
local level="$1"
local message="$2"
local timestamp=$(date '+%Y-%m-%d %H:%M:%S')
local timestamp
timestamp=$(date '+%Y-%m-%d %H:%M:%S')
echo "[$timestamp] [$level] $message" | tee -a "$RECOVERY_LOG"
}
@@ -180,7 +181,8 @@ find_best_backup() {
# Find the most recent backup that exists and has reasonable size
for backup_file in "${PLEX_DB_DIR}/${backup_type}"-????-??-??*; do
if [[ -f "$backup_file" ]]; then
local file_size=$(stat -f%z "$backup_file" 2>/dev/null || stat -c%s "$backup_file" 2>/dev/null)
local file_size
file_size=$(stat -f%z "$backup_file" 2>/dev/null || stat -c%s "$backup_file" 2>/dev/null)
# Check if file size is reasonable (> 100MB for main DB, > 500MB for blobs)
if [[ "$backup_type" == "com.plexapp.plugins.library.db" && $file_size -gt 104857600 ]] || \
@@ -198,7 +200,8 @@ restore_from_backup() {
print_status "$YELLOW" "Finding and restoring from best available backups..."
# Find best main database backup
local main_backup=$(find_best_backup "com.plexapp.plugins.library.db")
local main_backup
main_backup=$(find_best_backup "com.plexapp.plugins.library.db")
if [[ -n "$main_backup" ]]; then
print_status "$GREEN" "Found main database backup: $(basename "$main_backup")"
@@ -219,7 +222,8 @@ restore_from_backup() {
fi
# Find best blobs database backup
local blobs_backup=$(find_best_backup "com.plexapp.plugins.library.blobs.db")
local blobs_backup
blobs_backup=$(find_best_backup "com.plexapp.plugins.library.blobs.db")
if [[ -n "$blobs_backup" ]]; then
print_status "$GREEN" "Found blobs database backup: $(basename "$blobs_backup")"
@@ -275,7 +279,8 @@ fix_ownership() {
local blobs_db="${PLEX_DB_DIR}/com.plexapp.plugins.library.blobs.db"
if [[ -f "$main_db" ]]; then
local main_owner=$(stat -f%Su:%Sg "$main_db" 2>/dev/null || stat -c%U:%G "$main_db" 2>/dev/null)
local main_owner
main_owner=$(stat -f%Su:%Sg "$main_db" 2>/dev/null || stat -c%U:%G "$main_db" 2>/dev/null)
if [[ "$main_owner" == "$PLEX_USER:$PLEX_GROUP" ]]; then
print_status "$GREEN" "Main database ownership: CORRECT ($main_owner)"
else

View File

@@ -66,7 +66,8 @@ readonly RESET='\033[0m'
# 🔧 Configuration
readonly PLEX_SERVICE="plexmediaserver"
readonly SCRIPT_NAME="$(basename "$0")"
readonly SCRIPT_NAME
SCRIPT_NAME="$(basename "$0")"
readonly PLEX_WEB_URL="http://localhost:32400/web"
# 🎭 Unicode symbols for fancy output

View File

@@ -59,14 +59,12 @@ RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
CYAN='\033[0;36m'
NC='\033[0m' # No Color
# Configuration
SCRIPT_DIR="$(dirname "$(readlink -f "$0")")"
PLEX_DB_DIR="/var/lib/plexmediaserver/Library/Application Support/Plex Media Server/Plug-in Support/Databases"
MAIN_DB="com.plexapp.plugins.library.db"
BLOBS_DB="com.plexapp.plugins.library.blobs.db"
PLEX_SQLITE="/usr/lib/plexmediaserver/Plex SQLite"
BACKUP_SUFFIX="recovery-$(date +%Y%m%d_%H%M%S)"
RECOVERY_LOG="$SCRIPT_DIR/logs/database-recovery-$(date +%Y%m%d_%H%M%S).log"
@@ -80,7 +78,8 @@ mkdir -p "$SCRIPT_DIR/logs"
# Logging function
log_message() {
local message="[$(date '+%Y-%m-%d %H:%M:%S')] $1"
local message
message="[$(date '+%Y-%m-%d %H:%M:%S')] $1"
echo -e "$message"
echo "$message" >> "$RECOVERY_LOG"
}
@@ -216,7 +215,8 @@ start_plex_service() {
# Check database integrity
check_database_integrity() {
local db_file="$1"
local db_name=$(basename "$db_file")
local db_name
db_name=$(basename "$db_file")
log_info "Checking integrity of $db_name..."
@@ -249,7 +249,8 @@ check_database_integrity() {
# Recovery Method 1: SQLite .recover command
recovery_method_sqlite_recover() {
local db_file="$1"
local db_name=$(basename "$db_file")
local db_name
db_name=$(basename "$db_file")
local recovered_sql="${db_file}.recovered.sql"
local new_db="${db_file}.recovered"
@@ -315,7 +316,8 @@ recovery_method_sqlite_recover() {
# Recovery Method 2: Partial table extraction
recovery_method_partial_extraction() {
local db_file="$1"
local db_name=$(basename "$db_file")
local db_name
db_name=$(basename "$db_file")
local partial_sql="${db_file}.partial.sql"
local new_db="${db_file}.partial"
@@ -336,7 +338,7 @@ recovery_method_partial_extraction() {
} > "$partial_sql"
# Extract schema
if sudo "$PLEX_SQLITE" "$db_file" ".schema" >> "$partial_sql" 2>/dev/null; then
if sudo "$PLEX_SQLITE" "$db_file" ".schema" | sudo tee -a "$partial_sql" >/dev/null 2>&1; then
log_success "Schema extracted successfully"
else
log_warning "Schema extraction failed, trying alternative method"
@@ -372,7 +374,7 @@ recovery_method_partial_extraction() {
local extract_success=false
local limit=10000
while [ $limit -le 100000 ] && [ "$extract_success" = false ]; do
while [ "$limit" -le 100000 ] && [ "$extract_success" = false ]; do
if sudo "$PLEX_SQLITE" "$db_file" "SELECT COUNT(*) FROM $table;" >/dev/null 2>&1; then
# Table exists and is readable
{
@@ -382,8 +384,9 @@ recovery_method_partial_extraction() {
} >> "$partial_sql"
if sudo "$PLEX_SQLITE" "$db_file" ".mode insert $table" >>/dev/null 2>&1 && \
sudo "$PLEX_SQLITE" "$db_file" "SELECT * FROM $table LIMIT $limit;" >> "$partial_sql" 2>/dev/null; then
local row_count=$(tail -n +3 "$partial_sql" | grep "INSERT INTO $table" | wc -l)
sudo "$PLEX_SQLITE" "$db_file" "SELECT * FROM $table LIMIT $limit;" | sudo tee -a "$partial_sql" >/dev/null 2>&1; then
local row_count
row_count=$(tail -n +3 "$partial_sql" | grep -c "INSERT INTO $table")
log_success "Extracted $row_count rows from $table"
extract_success=true
else
@@ -444,7 +447,8 @@ recovery_method_partial_extraction() {
# Recovery Method 3: Emergency data extraction
recovery_method_emergency_extraction() {
local db_file="$1"
local db_name=$(basename "$db_file")
local db_name
db_name=$(basename "$db_file")
log_info "Recovery Method 3: Emergency data extraction for $db_name"
@@ -544,7 +548,8 @@ recovery_method_backup_restore() {
fi
# Find most recent backup
local latest_backup=$(find "$backup_dir" -maxdepth 1 -name "plex-backup-*.tar.gz" -type f 2>/dev/null | sort -r | head -1)
local latest_backup
latest_backup=$(find "$backup_dir" -maxdepth 1 -name "plex-backup-*.tar.gz" -type f 2>/dev/null | sort -r | head -1)
if [ -z "$latest_backup" ]; then
log_error "No backup files found in $backup_dir"

View File

@@ -0,0 +1,701 @@
#!/bin/bash
################################################################################
# Advanced Plex Database Recovery Script
################################################################################
#
# Author: Peter Wood <peter@peterwood.dev>
# Description: Advanced database recovery script with multiple repair strategies
# for corrupted Plex databases. Implements progressive recovery
# techniques from gentle repairs to aggressive reconstruction
# methods, with comprehensive logging and rollback capabilities.
#
# Features:
# - Progressive recovery strategy (gentle to aggressive)
# - Multiple repair techniques (VACUUM, dump/restore, rebuild)
# - Automatic backup before any recovery attempts
# - Database integrity verification at each step
# - Rollback capability if recovery fails
# - Dry-run mode for safe testing
# - Comprehensive logging and reporting
#
# Related Scripts:
# - backup-plex.sh: Creates backups for recovery scenarios
# - icu-aware-recovery.sh: ICU-specific recovery methods
# - nuclear-plex-recovery.sh: Last-resort complete replacement
# - validate-plex-recovery.sh: Validates recovery results
# - restore-plex.sh: Standard restoration from backups
# - plex.sh: General Plex service management
#
# Usage:
# ./recover-plex-database.sh # Interactive recovery
# ./recover-plex-database.sh --auto # Automated recovery
# ./recover-plex-database.sh --dry-run # Show recovery plan
# ./recover-plex-database.sh --gentle # Gentle repair only
# ./recover-plex-database.sh --aggressive # Aggressive repair methods
#
# Dependencies:
# - sqlite3 or Plex SQLite binary
# - systemctl (for service management)
# - Sufficient disk space for backups and temp files
#
# Exit Codes:
# 0 - Recovery successful
# 1 - General error
# 2 - Database corruption beyond repair
# 3 - Service management failure
# 4 - Insufficient disk space
# 5 - Recovery partially successful (manual intervention needed)
#
################################################################################
# Advanced Plex Database Recovery Script
# Usage: ./recover-plex-database.sh [--auto] [--dry-run]
set -e
# Color codes for output
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
CYAN='\033[0;36m'
NC='\033[0m' # No Color
# Configuration
SCRIPT_DIR="$(dirname "$(readlink -f "$0")")"
PLEX_DB_DIR="/var/lib/plexmediaserver/Library/Application Support/Plex Media Server/Plug-in Support/Databases"
MAIN_DB="com.plexapp.plugins.library.db"
BLOBS_DB="com.plexapp.plugins.library.blobs.db"
PLEX_SQLITE="/usr/lib/plexmediaserver/Plex SQLite"
BACKUP_SUFFIX="recovery-$(date +%Y%m%d_%H%M%S)"
RECOVERY_LOG="$SCRIPT_DIR/logs/database-recovery-$(date +%Y%m%d_%H%M%S).log"
# Script options
AUTO_MODE=false
DRY_RUN=false
# Ensure logs directory exists
mkdir -p "$SCRIPT_DIR/logs"
# Logging function
log_message() {
local message="[$(date '+%Y-%m-%d %H:%M:%S')] $1"
echo -e "$message"
echo "$message" >> "$RECOVERY_LOG"
}
log_success() {
log_message "${GREEN}SUCCESS: $1${NC}"
}
log_error() {
log_message "${RED}ERROR: $1${NC}"
}
log_warning() {
log_message "${YELLOW}WARNING: $1${NC}"
}
log_info() {
log_message "${BLUE}INFO: $1${NC}"
}
# Parse command line arguments
while [[ $# -gt 0 ]]; do
case $1 in
--auto)
AUTO_MODE=true
shift
;;
--dry-run)
DRY_RUN=true
shift
;;
-h|--help)
echo "Usage: $0 [--auto] [--dry-run] [--help]"
echo ""
echo "Options:"
echo " --auto Automatically attempt all recovery methods without prompts"
echo " --dry-run Show what would be done without making changes"
echo " --help Show this help message"
echo ""
echo "Recovery Methods (in order):"
echo " 1. SQLite .recover command (modern SQLite recovery)"
echo " 2. Partial table extraction with LIMIT"
echo " 3. Emergency data extraction"
echo " 4. Backup restoration from most recent good backup"
echo ""
exit 0
;;
*)
log_error "Unknown option: $1"
exit 1
;;
esac
done
# Check dependencies
check_dependencies() {
log_info "Checking dependencies..."
if [ ! -f "$PLEX_SQLITE" ]; then
log_error "Plex SQLite binary not found at: $PLEX_SQLITE"
return 1
fi
if ! command -v sqlite3 >/dev/null 2>&1; then
log_error "Standard sqlite3 command not found"
return 1
fi
# Make Plex SQLite executable
sudo chmod +x "$PLEX_SQLITE" 2>/dev/null || true
log_success "Dependencies check passed"
return 0
}
# Stop Plex service safely
stop_plex_service() {
log_info "Stopping Plex Media Server..."
if [ "$DRY_RUN" = true ]; then
log_info "DRY RUN: Would stop Plex service"
return 0
fi
if sudo systemctl is-active --quiet plexmediaserver; then
sudo systemctl stop plexmediaserver
# Wait for service to fully stop
local timeout=30
while sudo systemctl is-active --quiet plexmediaserver && [ $timeout -gt 0 ]; do
sleep 1
timeout=$((timeout - 1))
done
if sudo systemctl is-active --quiet plexmediaserver; then
log_error "Failed to stop Plex service within timeout"
return 1
fi
log_success "Plex service stopped successfully"
else
log_info "Plex service was already stopped"
fi
return 0
}
# Start Plex service
start_plex_service() {
log_info "Starting Plex Media Server..."
if [ "$DRY_RUN" = true ]; then
log_info "DRY RUN: Would start Plex service"
return 0
fi
sudo systemctl start plexmediaserver
# Wait for service to start
local timeout=30
while ! sudo systemctl is-active --quiet plexmediaserver && [ $timeout -gt 0 ]; do
sleep 1
timeout=$((timeout - 1))
done
if sudo systemctl is-active --quiet plexmediaserver; then
log_success "Plex service started successfully"
else
log_warning "Plex service may not have started properly"
fi
}
# Check database integrity
check_database_integrity() {
local db_file="$1"
local db_name=$(basename "$db_file")
log_info "Checking integrity of $db_name..."
if [ ! -f "$db_file" ]; then
log_error "Database file not found: $db_file"
return 1
fi
local integrity_result
integrity_result=$(sudo "$PLEX_SQLITE" "$db_file" "PRAGMA integrity_check;" 2>&1)
local check_exit_code=$?
if [ $check_exit_code -ne 0 ]; then
log_error "Failed to run integrity check on $db_name"
return 1
fi
if echo "$integrity_result" | grep -q "^ok$"; then
log_success "Database integrity check passed: $db_name"
return 0
else
log_warning "Database integrity issues detected in $db_name:"
echo "$integrity_result" | while IFS= read -r line; do
log_warning " $line"
done
return 1
fi
}
# Recovery Method 1: SQLite .recover command
recovery_method_sqlite_recover() {
local db_file="$1"
local db_name=$(basename "$db_file")
local recovered_sql="${db_file}.recovered.sql"
local new_db="${db_file}.recovered"
log_info "Recovery Method 1: SQLite .recover command for $db_name"
if [ "$DRY_RUN" = true ]; then
log_info "DRY RUN: Would attempt SQLite .recover method"
return 0
fi
# Check if .recover is available (SQLite 3.37.0+)
if ! echo ".help" | sqlite3 2>/dev/null | grep -q "\.recover"; then
log_warning "SQLite .recover command not available in this version"
return 1
fi
log_info "Attempting SQLite .recover method..."
# Use standard sqlite3 for .recover as it's more reliable
if sqlite3 "$db_file" ".recover" > "$recovered_sql" 2>/dev/null; then
log_success "Recovery SQL generated successfully"
# Create new database from recovered data
if [ -f "$recovered_sql" ] && [ -s "$recovered_sql" ]; then
if sqlite3 "$new_db" < "$recovered_sql" 2>/dev/null; then
log_success "New database created from recovered data"
# Verify new database integrity
if sqlite3 "$new_db" "PRAGMA integrity_check;" | grep -q "ok"; then
log_success "Recovered database integrity verified"
# Replace original with recovered database
if sudo mv "$db_file" "${db_file}.corrupted" && sudo mv "$new_db" "$db_file"; then
sudo chown plex:plex "$db_file"
sudo chmod 644 "$db_file"
log_success "Database successfully recovered using .recover method"
# Clean up
rm -f "$recovered_sql"
return 0
else
log_error "Failed to replace original database"
fi
else
log_error "Recovered database failed integrity check"
fi
else
log_error "Failed to create database from recovered SQL"
fi
else
log_error "Recovery SQL file is empty or not generated"
fi
else
log_error "SQLite .recover command failed"
fi
# Clean up on failure
rm -f "$recovered_sql" "$new_db"
return 1
}
# Recovery Method 2: Partial table extraction
recovery_method_partial_extraction() {
local db_file="$1"
local db_name=$(basename "$db_file")
local partial_sql="${db_file}.partial.sql"
local new_db="${db_file}.partial"
log_info "Recovery Method 2: Partial table extraction for $db_name"
if [ "$DRY_RUN" = true ]; then
log_info "DRY RUN: Would attempt partial extraction method"
return 0
fi
log_info "Extracting schema and partial data..."
# Start the SQL file with schema
{
echo "-- Partial recovery of $db_name"
echo "-- Generated on $(date)"
echo ""
} > "$partial_sql"
# Extract schema
if sudo "$PLEX_SQLITE" "$db_file" ".schema" >> "$partial_sql" 2>/dev/null; then
log_success "Schema extracted successfully"
else
log_warning "Schema extraction failed, trying alternative method"
# Try with standard sqlite3
if sqlite3 "$db_file" ".schema" >> "$partial_sql" 2>/dev/null; then
log_success "Schema extracted with standard sqlite3"
else
log_error "Schema extraction failed completely"
rm -f "$partial_sql"
return 1
fi
fi
# Critical tables to extract (in order of importance)
local critical_tables=(
"accounts"
"library_sections"
"directories"
"metadata_items"
"media_items"
"media_parts"
"media_streams"
"taggings"
"tags"
)
log_info "Attempting to extract critical tables..."
for table in "${critical_tables[@]}"; do
log_info "Extracting table: $table"
# Try to extract with LIMIT to avoid hitting corrupted data
local extract_success=false
local limit=10000
while [ $limit -le 100000 ] && [ "$extract_success" = false ]; do
if sudo "$PLEX_SQLITE" "$db_file" "SELECT COUNT(*) FROM $table;" >/dev/null 2>&1; then
# Table exists and is readable
{
echo ""
echo "-- Data for table $table (limited to $limit rows)"
echo "DELETE FROM $table;"
} >> "$partial_sql"
if sudo "$PLEX_SQLITE" "$db_file" ".mode insert $table" >>/dev/null 2>&1 && \
sudo "$PLEX_SQLITE" "$db_file" "SELECT * FROM $table LIMIT $limit;" >> "$partial_sql" 2>/dev/null; then
local row_count=$(tail -n +3 "$partial_sql" | grep "INSERT INTO $table" | wc -l)
log_success "Extracted $row_count rows from $table"
extract_success=true
else
log_warning "Failed to extract $table with limit $limit, trying smaller limit"
limit=$((limit / 2))
fi
else
log_warning "Table $table is not accessible or doesn't exist"
break
fi
done
if [ "$extract_success" = false ]; then
log_warning "Could not extract any data from table $table"
fi
done
# Create new database from partial data
if [ -f "$partial_sql" ] && [ -s "$partial_sql" ]; then
log_info "Creating database from partial extraction..."
if sqlite3 "$new_db" < "$partial_sql" 2>/dev/null; then
log_success "Partial database created successfully"
# Verify basic functionality
if sqlite3 "$new_db" "PRAGMA integrity_check;" | grep -q "ok"; then
log_success "Partial database integrity verified"
# Replace original with partial database
if sudo mv "$db_file" "${db_file}.corrupted" && sudo mv "$new_db" "$db_file"; then
sudo chown plex:plex "$db_file"
sudo chmod 644 "$db_file"
log_success "Database partially recovered - some data may be lost"
log_warning "Please verify your Plex library after recovery"
# Clean up
rm -f "$partial_sql"
return 0
else
log_error "Failed to replace original database"
fi
else
log_error "Partial database failed integrity check"
fi
else
log_error "Failed to create database from partial extraction"
fi
else
log_error "Partial extraction SQL file is empty"
fi
# Clean up on failure
rm -f "$partial_sql" "$new_db"
return 1
}
# Recovery Method 3: Emergency data extraction
recovery_method_emergency_extraction() {
local db_file="$1"
local db_name=$(basename "$db_file")
log_info "Recovery Method 3: Emergency data extraction for $db_name"
if [ "$DRY_RUN" = true ]; then
log_info "DRY RUN: Would attempt emergency extraction method"
return 0
fi
log_warning "This method will create a minimal database with basic library structure"
log_warning "You will likely need to re-scan your media libraries"
if [ "$AUTO_MODE" = false ]; then
read -p "Continue with emergency extraction? This will lose most metadata [y/N]: " -n 1 -r
echo
if [[ ! $REPLY =~ ^[Yy]$ ]]; then
log_info "Emergency extraction cancelled by user"
return 1
fi
fi
local emergency_db="${db_file}.emergency"
# Create a minimal database with essential tables
log_info "Creating minimal emergency database..."
cat > "/tmp/emergency_schema.sql" << 'EOF'
-- Emergency Plex database schema (minimal)
CREATE TABLE accounts (
id INTEGER PRIMARY KEY,
name TEXT,
hashed_password TEXT,
salt TEXT,
created_at DATETIME,
updated_at DATETIME
);
CREATE TABLE library_sections (
id INTEGER PRIMARY KEY,
name TEXT,
section_type INTEGER,
agent TEXT,
scanner TEXT,
language TEXT,
created_at DATETIME,
updated_at DATETIME
);
CREATE TABLE directories (
id INTEGER PRIMARY KEY,
library_section_id INTEGER,
path TEXT,
created_at DATETIME,
updated_at DATETIME
);
-- Insert default admin account
INSERT INTO accounts (id, name, created_at, updated_at)
VALUES (1, 'plex', datetime('now'), datetime('now'));
EOF
if sqlite3 "$emergency_db" < "/tmp/emergency_schema.sql" 2>/dev/null; then
log_success "Emergency database created"
# Replace original with emergency database
if sudo mv "$db_file" "${db_file}.corrupted" && sudo mv "$emergency_db" "$db_file"; then
sudo chown plex:plex "$db_file"
sudo chmod 644 "$db_file"
log_success "Emergency database installed"
log_warning "You will need to re-add library sections and re-scan media"
# Clean up
rm -f "/tmp/emergency_schema.sql"
return 0
else
log_error "Failed to install emergency database"
fi
else
log_error "Failed to create emergency database"
fi
# Clean up on failure
rm -f "/tmp/emergency_schema.sql" "$emergency_db"
return 1
}
# Recovery Method 4: Restore from backup
recovery_method_backup_restore() {
local db_file="$1"
local backup_dir="/mnt/share/media/backups/plex"
log_info "Recovery Method 4: Restore from most recent backup"
if [ "$DRY_RUN" = true ]; then
log_info "DRY RUN: Would attempt backup restoration"
return 0
fi
# Find most recent backup
local latest_backup=$(find "$backup_dir" -maxdepth 1 -name "plex-backup-*.tar.gz" -type f 2>/dev/null | sort -r | head -1)
if [ -z "$latest_backup" ]; then
log_error "No backup files found in $backup_dir"
return 1
fi
log_info "Found latest backup: $(basename "$latest_backup")"
if [ "$AUTO_MODE" = false ]; then
read -p "Restore from backup $(basename "$latest_backup")? [y/N]: " -n 1 -r
echo
if [[ ! $REPLY =~ ^[Yy]$ ]]; then
log_info "Backup restoration cancelled by user"
return 1
fi
fi
# Extract and restore database from backup
local temp_extract="/tmp/plex-recovery-extract-$(date +%Y%m%d_%H%M%S)"
mkdir -p "$temp_extract"
log_info "Extracting backup..."
if tar -xzf "$latest_backup" -C "$temp_extract" 2>/dev/null; then
local backup_db_file="$temp_extract/$(basename "$db_file")"
if [ -f "$backup_db_file" ]; then
# Verify backup database integrity
if sqlite3 "$backup_db_file" "PRAGMA integrity_check;" | grep -q "ok"; then
log_success "Backup database integrity verified"
# Replace corrupted database with backup
if sudo mv "$db_file" "${db_file}.corrupted" && sudo cp "$backup_db_file" "$db_file"; then
sudo chown plex:plex "$db_file"
sudo chmod 644 "$db_file"
log_success "Database restored from backup"
# Clean up
rm -rf "$temp_extract"
return 0
else
log_error "Failed to replace database with backup"
fi
else
log_error "Backup database also has integrity issues"
fi
else
log_error "Database file not found in backup"
fi
else
log_error "Failed to extract backup"
fi
# Clean up on failure
rm -rf "$temp_extract"
return 1
}
# Main recovery function
main_recovery() {
local db_file="$PLEX_DB_DIR/$MAIN_DB"
log_info "Starting Plex database recovery process"
log_info "Recovery log: $RECOVERY_LOG"
# Check dependencies
if ! check_dependencies; then
exit 1
fi
# Stop Plex service
if ! stop_plex_service; then
exit 1
fi
# Change to database directory
cd "$PLEX_DB_DIR" || {
log_error "Failed to change to database directory"
exit 1
}
# Check if database exists
if [ ! -f "$MAIN_DB" ]; then
log_error "Main database file not found: $MAIN_DB"
exit 1
fi
# Create backup of current corrupted state
log_info "Creating backup of current corrupted database..."
if [ "$DRY_RUN" = false ]; then
sudo cp "$MAIN_DB" "${MAIN_DB}.${BACKUP_SUFFIX}"
log_success "Corrupted database backed up as: ${MAIN_DB}.${BACKUP_SUFFIX}"
fi
# Check current integrity
log_info "Verifying database corruption..."
if check_database_integrity "$MAIN_DB"; then
log_success "Database integrity check passed - no recovery needed!"
start_plex_service
exit 0
fi
log_warning "Database corruption confirmed, attempting recovery..."
# Try recovery methods in order
local recovery_methods=(
"recovery_method_sqlite_recover"
"recovery_method_partial_extraction"
"recovery_method_emergency_extraction"
"recovery_method_backup_restore"
)
for method in "${recovery_methods[@]}"; do
log_info "Attempting: $method"
if $method "$MAIN_DB"; then
log_success "Recovery successful using: $method"
# Verify the recovered database
if check_database_integrity "$MAIN_DB"; then
log_success "Recovered database integrity verified"
start_plex_service
log_success "Database recovery completed successfully!"
log_info "Please check your Plex server and verify your libraries"
exit 0
else
log_error "Recovered database still has integrity issues"
# Restore backup for next attempt
if [ "$DRY_RUN" = false ]; then
sudo cp "${MAIN_DB}.${BACKUP_SUFFIX}" "$MAIN_DB"
fi
fi
else
log_warning "Recovery method failed: $method"
fi
done
log_error "All recovery methods failed"
log_error "Manual intervention required"
# Restore original corrupted database
if [ "$DRY_RUN" = false ]; then
sudo cp "${MAIN_DB}.${BACKUP_SUFFIX}" "$MAIN_DB"
fi
start_plex_service
exit 1
}
# Trap to ensure Plex service is restarted
trap 'start_plex_service' EXIT
# Run main recovery
main_recovery "$@"

View File

@@ -87,12 +87,17 @@ log_warning() {
# List available backups
list_backups() {
log_message "Available backups:"
find "$BACKUP_ROOT" -maxdepth 1 -type f -name "plex-backup-*.tar.gz" | sort -r | while read backup_file; do
local backup_name=$(basename "$backup_file")
local backup_date=$(echo "$backup_name" | sed 's/plex-backup-\([0-9]\{8\}\)_[0-9]\{6\}\.tar\.gz/\1/')
find "$BACKUP_ROOT" -maxdepth 1 -type f -name "plex-backup-*.tar.gz" | sort -r | while read -r backup_file; do
local backup_name
backup_name=$(basename "$backup_file")
local backup_date
backup_date=${backup_name#plex-backup-}
backup_date=${backup_date%_*.tar.gz}
if [[ "$backup_date" =~ ^[0-9]{8}$ ]]; then
local readable_date=$(date -d "${backup_date:0:4}-${backup_date:4:2}-${backup_date:6:2}" '+%B %d, %Y' 2>/dev/null || echo "Unknown date")
local file_size=$(du -h "$backup_file" 2>/dev/null | cut -f1)
local readable_date
readable_date=$(date -d "${backup_date:0:4}-${backup_date:4:2}-${backup_date:6:2}" '+%B %d, %Y' 2>/dev/null || echo "Unknown date")
local file_size
file_size=$(du -h "$backup_file" 2>/dev/null | cut -f1)
echo " $backup_name ($readable_date) - $file_size"
else
echo " $backup_name - $(du -h "$backup_file" 2>/dev/null | cut -f1)"
@@ -117,7 +122,7 @@ validate_backup() {
# List contents to verify expected files are present
log_message "Archive contents:"
tar -tzf "$backup_file" | while read file; do
tar -tzf "$backup_file" | while read -r file; do
log_success " Found: $file"
done
return 0
@@ -129,7 +134,8 @@ validate_backup() {
# Create backup of current Plex data
backup_current_data() {
local backup_suffix=$(date '+%Y%m%d_%H%M%S')
local backup_suffix
backup_suffix=$(date '+%Y%m%d_%H%M%S')
local current_backup_dir="$SCRIPT_DIR/plex_current_backup_$backup_suffix"
log_message "Creating backup of current Plex data..."
@@ -162,7 +168,8 @@ restore_files() {
fi
# Create temporary extraction directory
local temp_dir="/tmp/plex-restore-$(date +%Y%m%d_%H%M%S)"
local temp_dir
temp_dir="/tmp/plex-restore-$(date +%Y%m%d_%H%M%S)"
mkdir -p "$temp_dir"
log_message "Extracting backup archive..."
@@ -277,8 +284,8 @@ main() {
manage_plex_service stop
# Backup current data
local current_backup=$(backup_current_data)
if [ $? -ne 0 ]; then
local current_backup
if ! current_backup=$(backup_current_data); then
log_error "Failed to backup current data"
manage_plex_service start
exit 1

View File

@@ -0,0 +1,306 @@
#!/bin/bash
################################################################################
# Plex Media Server Backup Restoration Script
################################################################################
#
# Author: Peter Wood <peter@peterwood.dev>
# Description: Safe and reliable restoration script for Plex Media Server
# backups with validation, dry-run capability, and automatic
# backup of current data before restoration.
#
# Features:
# - Interactive backup selection from available archives
# - Backup validation before restoration
# - Dry-run mode for testing restoration process
# - Automatic backup of current data before restoration
# - Service management (stop/start Plex during restoration)
# - Comprehensive logging and error handling
# - File ownership and permission restoration
#
# Related Scripts:
# - backup-plex.sh: Creates backups that this script restores
# - validate-plex-backups.sh: Validates backup integrity
# - monitor-plex-backup.sh: Monitors backup system health
# - test-plex-backup.sh: Tests backup/restore operations
# - plex.sh: General Plex service management
#
# Usage:
# ./restore-plex.sh # List available backups
# ./restore-plex.sh plex-backup-20250125_143022.tar.gz # Restore specific backup
# ./restore-plex.sh --dry-run backup-file.tar.gz # Test restoration process
# ./restore-plex.sh --list # List all available backups
#
# Dependencies:
# - tar (for archive extraction)
# - Plex Media Server
# - systemctl (for service management)
# - Access to backup directory
#
# Exit Codes:
# 0 - Success
# 1 - General error
# 2 - Backup file not found or invalid
# 3 - Service management failure
# 4 - Restoration failure
#
################################################################################
# Plex Backup Restoration Script
# Usage: ./restore-plex.sh [backup_date] [--dry-run]
set -e
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
NC='\033[0m'
# Configuration
SCRIPT_DIR="$(dirname "$(readlink -f "$0")")"
BACKUP_ROOT="/mnt/share/media/backups/plex"
PLEX_DATA_DIR="/var/lib/plexmediaserver/Library/Application Support/Plex Media Server"
# Plex file locations
declare -A RESTORE_LOCATIONS=(
["com.plexapp.plugins.library.db"]="$PLEX_DATA_DIR/Plug-in Support/Databases/"
["com.plexapp.plugins.library.blobs.db"]="$PLEX_DATA_DIR/Plug-in Support/Databases/"
["Preferences.xml"]="$PLEX_DATA_DIR/"
)
log_message() {
echo -e "$(date '+%H:%M:%S') $1"
}
log_error() {
log_message "${RED}ERROR: $1${NC}"
}
log_success() {
log_message "${GREEN}SUCCESS: $1${NC}"
}
log_warning() {
log_message "${YELLOW}WARNING: $1${NC}"
}
# List available backups
list_backups() {
log_message "Available backups:"
find "$BACKUP_ROOT" -maxdepth 1 -type f -name "plex-backup-*.tar.gz" | sort -r | while read backup_file; do
local backup_name=$(basename "$backup_file")
local backup_date=$(echo "$backup_name" | sed 's/plex-backup-\([0-9]\{8\}\)_[0-9]\{6\}\.tar\.gz/\1/')
if [[ "$backup_date" =~ ^[0-9]{8}$ ]]; then
local readable_date=$(date -d "${backup_date:0:4}-${backup_date:4:2}-${backup_date:6:2}" '+%B %d, %Y' 2>/dev/null || echo "Unknown date")
local file_size=$(du -h "$backup_file" 2>/dev/null | cut -f1)
echo " $backup_name ($readable_date) - $file_size"
else
echo " $backup_name - $(du -h "$backup_file" 2>/dev/null | cut -f1)"
fi
done
}
# Validate backup integrity
validate_backup() {
local backup_file="$1"
if [ ! -f "$backup_file" ]; then
log_error "Backup file not found: $backup_file"
return 1
fi
log_message "Validating backup integrity for $(basename "$backup_file")..."
# Test archive integrity
if tar -tzf "$backup_file" >/dev/null 2>&1; then
log_success "Archive integrity check passed"
# List contents to verify expected files are present
log_message "Archive contents:"
tar -tzf "$backup_file" | while read file; do
log_success " Found: $file"
done
return 0
else
log_error "Archive integrity check failed"
return 1
fi
}
# Create backup of current Plex data
backup_current_data() {
local backup_suffix=$(date '+%Y%m%d_%H%M%S')
local current_backup_dir="$SCRIPT_DIR/plex_current_backup_$backup_suffix"
log_message "Creating backup of current Plex data..."
mkdir -p "$current_backup_dir"
for file in "${!RESTORE_LOCATIONS[@]}"; do
local src="${RESTORE_LOCATIONS[$file]}$file"
if [ -f "$src" ]; then
if sudo cp "$src" "$current_backup_dir/"; then
log_success "Backed up current: $file"
else
log_error "Failed to backup current: $file"
return 1
fi
fi
done
log_success "Current data backed up to: $current_backup_dir"
echo "$current_backup_dir"
}
# Restore files from backup
restore_files() {
local backup_file="$1"
local dry_run="$2"
if [ ! -f "$backup_file" ]; then
log_error "Backup file not found: $backup_file"
return 1
fi
# Create temporary extraction directory
local temp_dir="/tmp/plex-restore-$(date +%Y%m%d_%H%M%S)"
mkdir -p "$temp_dir"
log_message "Extracting backup archive..."
if ! tar -xzf "$backup_file" -C "$temp_dir"; then
log_error "Failed to extract backup archive"
rm -rf "$temp_dir"
return 1
fi
log_message "Restoring files..."
local restore_errors=0
for file in "${!RESTORE_LOCATIONS[@]}"; do
local src_file="$temp_dir/$file"
local dest_path="${RESTORE_LOCATIONS[$file]}"
local dest_file="$dest_path$file"
if [ -f "$src_file" ]; then
if [ "$dry_run" == "true" ]; then
log_message "Would restore: $file to $dest_file"
else
log_message "Restoring: $file"
if sudo cp "$src_file" "$dest_file"; then
sudo chown plex:plex "$dest_file"
log_success "Restored: $file"
else
log_error "Failed to restore: $file"
restore_errors=$((restore_errors + 1))
fi
fi
else
log_warning "File not found in backup: $file"
restore_errors=$((restore_errors + 1))
fi
done
# Clean up temporary directory
rm -rf "$temp_dir"
return $restore_errors
}
# Manage Plex service
manage_plex_service() {
local action="$1"
log_message "$action Plex Media Server..."
case "$action" in
"stop")
sudo systemctl stop plexmediaserver.service
sleep 3
log_success "Plex stopped"
;;
"start")
sudo systemctl start plexmediaserver.service
sleep 3
log_success "Plex started"
;;
esac
}
# Main function
main() {
local backup_file="$1"
local dry_run=false
# Check for dry-run flag
if [ "$2" = "--dry-run" ] || [ "$1" = "--dry-run" ]; then
dry_run=true
fi
# If no backup file provided, list available backups
if [ -z "$backup_file" ] || [ "$backup_file" = "--dry-run" ]; then
list_backups
echo
echo "Usage: $0 <backup_file> [--dry-run]"
echo "Example: $0 plex-backup-20250125_143022.tar.gz"
echo " $0 /mnt/share/media/backups/plex/plex-backup-20250125_143022.tar.gz"
exit 0
fi
# If relative path, prepend BACKUP_ROOT
if [[ "$backup_file" != /* ]]; then
backup_file="$BACKUP_ROOT/$backup_file"
fi
# Validate backup exists and is complete
if ! validate_backup "$backup_file"; then
log_error "Backup validation failed"
exit 1
fi
if [ "$dry_run" = "true" ]; then
restore_files "$backup_file" true
log_message "Dry run completed. No changes were made."
exit 0
fi
# Confirm restoration
echo
log_warning "This will restore Plex data from backup $(basename "$backup_file")"
log_warning "Current Plex data will be backed up before restoration"
read -p "Continue? (y/N): " -n 1 -r
echo
if [[ ! $REPLY =~ ^[Yy]$ ]]; then
log_message "Restoration cancelled"
exit 0
fi
# Stop Plex service
manage_plex_service stop
# Backup current data
local current_backup=$(backup_current_data)
if [ $? -ne 0 ]; then
log_error "Failed to backup current data"
manage_plex_service start
exit 1
fi
# Restore files
if restore_files "$backup_file" false; then
log_success "Restoration completed successfully"
log_message "Current data backup saved at: $current_backup"
else
log_error "Restoration failed"
manage_plex_service start
exit 1
fi
# Start Plex service
manage_plex_service start
log_success "Plex restoration completed. Please verify your server is working correctly."
}
# Trap to ensure Plex is restarted on script exit
trap 'manage_plex_service start' EXIT
main "$@"

View File

@@ -62,7 +62,6 @@ CYAN='\033[0;36m'
NC='\033[0m' # No Color
# Test configuration
SCRIPT_DIR="$(dirname "$(readlink -f "$0")")"
TEST_DIR="/tmp/plex-backup-test-$(date +%s)"
TEST_BACKUP_ROOT="$TEST_DIR/backups"
TEST_LOG_ROOT="$TEST_DIR/logs"
@@ -76,30 +75,35 @@ declare -a FAILED_TESTS=()
# Logging functions
log_test() {
local timestamp=$(date '+%Y-%m-%d %H:%M:%S')
local timestamp
timestamp=$(date '+%Y-%m-%d %H:%M:%S')
echo -e "${CYAN}[TEST ${timestamp}]${NC} $1"
}
log_pass() {
local timestamp=$(date '+%Y-%m-%d %H:%M:%S')
local timestamp
timestamp=$(date '+%Y-%m-%d %H:%M:%S')
echo -e "${GREEN}[PASS ${timestamp}]${NC} $1"
TESTS_PASSED=$((TESTS_PASSED + 1))
}
log_fail() {
local timestamp=$(date '+%Y-%m-%d %H:%M:%S')
local timestamp
timestamp=$(date '+%Y-%m-%d %H:%M:%S')
echo -e "${RED}[FAIL ${timestamp}]${NC} $1"
TESTS_FAILED=$((TESTS_FAILED + 1))
FAILED_TESTS+=("$1")
}
log_info() {
local timestamp=$(date '+%Y-%m-%d %H:%M:%S')
local timestamp
timestamp=$(date '+%Y-%m-%d %H:%M:%S')
echo -e "${BLUE}[INFO ${timestamp}]${NC} $1"
}
log_warn() {
local timestamp=$(date '+%Y-%m-%d %H:%M:%S')
local timestamp
timestamp=$(date '+%Y-%m-%d %H:%M:%S')
echo -e "${YELLOW}[WARN ${timestamp}]${NC} $1"
}
@@ -124,14 +128,16 @@ record_test_result() {
local test_name="$1"
local status="$2"
local error_message="$3"
local timestamp=$(date -Iseconds)
local timestamp
timestamp=$(date -Iseconds)
# Initialize results file if it doesn't exist
if [ ! -f "$TEST_RESULTS_FILE" ]; then
echo "[]" > "$TEST_RESULTS_FILE"
fi
local result=$(jq -n \
local result
result=$(jq -n \
--arg test_name "$test_name" \
--arg status "$status" \
--arg error_message "$error_message" \
@@ -186,7 +192,7 @@ mock_manage_plex_service() {
mock_calculate_checksum() {
local file="$1"
echo "$(echo "$file" | md5sum | cut -d' ' -f1)"
echo "$file" | md5sum | cut -d' ' -f1
return 0
}
@@ -226,10 +232,12 @@ test_performance_tracking() {
track_performance_test() {
local operation="$1"
local start_time="$2"
local end_time=$(date +%s)
local end_time
end_time=$(date +%s)
local duration=$((end_time - start_time))
local entry=$(jq -n \
local entry
entry=$(jq -n \
--arg operation "$operation" \
--arg duration "$duration" \
--arg timestamp "$(date -Iseconds)" \
@@ -244,12 +252,14 @@ test_performance_tracking() {
}
# Test tracking
local start_time=$(date +%s)
local start_time
start_time=$(date +%s)
sleep 1 # Simulate work
track_performance_test "test_operation" "$start_time"
# Verify entry was added
local entry_count=$(jq length "$test_perf_log")
local entry_count
entry_count=$(jq length "$test_perf_log")
if [ "$entry_count" -eq 1 ]; then
return 0
else
@@ -297,11 +307,13 @@ test_checksum_caching() {
calculate_checksum_test() {
local file="$1"
local cache_file="${file}.md5"
local file_mtime=$(stat -c %Y "$file" 2>/dev/null || echo "0")
local file_mtime
file_mtime=$(stat -c %Y "$file" 2>/dev/null || echo "0")
# Check cache
if [ -f "$cache_file" ]; then
local cache_mtime=$(stat -c %Y "$cache_file" 2>/dev/null || echo "0")
local cache_mtime
cache_mtime=$(stat -c %Y "$cache_file" 2>/dev/null || echo "0")
if [ "$cache_mtime" -gt "$file_mtime" ]; then
cat "$cache_file"
return 0
@@ -309,16 +321,19 @@ test_checksum_caching() {
fi
# Calculate and cache
local checksum=$(md5sum "$file" | cut -d' ' -f1)
local checksum
checksum=$(md5sum "$file" | cut -d' ' -f1)
echo "$checksum" > "$cache_file"
echo "$checksum"
}
# First calculation (should create cache)
local checksum1=$(calculate_checksum_test "$test_file")
local checksum1
checksum1=$(calculate_checksum_test "$test_file")
# Second calculation (should use cache)
local checksum2=$(calculate_checksum_test "$test_file")
local checksum2
checksum2=$(calculate_checksum_test "$test_file")
# Verify checksums match and cache file exists
if [ "$checksum1" = "$checksum2" ] && [ -f "$cache_file" ]; then
@@ -342,8 +357,10 @@ test_backup_verification() {
local src="$1"
local dest="$2"
local src_checksum=$(md5sum "$src" | cut -d' ' -f1)
local dest_checksum=$(md5sum "$dest" | cut -d' ' -f1)
local src_checksum
src_checksum=$(md5sum "$src" | cut -d' ' -f1)
local dest_checksum
dest_checksum=$(md5sum "$dest" | cut -d' ' -f1)
if [ "$src_checksum" = "$dest_checksum" ]; then
return 0
@@ -362,16 +379,17 @@ test_backup_verification() {
# Test: Parallel processing framework
test_parallel_processing() {
local temp_dir=$(mktemp -d)
local temp_dir
temp_dir=$(mktemp -d)
local -a pids=()
local total_jobs=5
local completed_jobs=0
# Simulate parallel jobs
for i in $(seq 1 $total_jobs); do
for i in $(seq 1 "$total_jobs"); do
(
# Simulate work
sleep 0.$i
sleep 0."$i"
echo "$i" > "$temp_dir/job_$i.result"
) &
pids+=($!)
@@ -385,7 +403,8 @@ test_parallel_processing() {
done
# Verify all jobs completed
local result_files=$(find "$temp_dir" -name "job_*.result" | wc -l)
local result_files
result_files=$(find "$temp_dir" -name "job_*.result" | wc -l)
# Cleanup
rm -rf "$temp_dir"
@@ -410,7 +429,8 @@ test_database_integrity() {
local db_file="$1"
# Use sqlite3 instead of Plex SQLite for testing
local result=$(sqlite3 "$db_file" "PRAGMA integrity_check;" 2>/dev/null)
local result
result=$(sqlite3 "$db_file" "PRAGMA integrity_check;" 2>/dev/null)
if echo "$result" | grep -q "ok"; then
return 0
@@ -449,7 +469,8 @@ test_configuration_parsing() {
}
# Test parsing
local result=$(parse_args_test --auto-repair --webhook=http://example.com)
local result
result=$(parse_args_test --auto-repair --webhook=http://example.com)
if echo "$result" | grep -q "true true http://example.com"; then
return 0
@@ -523,19 +544,22 @@ run_integration_tests() {
run_performance_tests() {
log_info "Starting performance benchmarks"
local start_time=$(date +%s)
local start_time
start_time=$(date +%s)
# Test file operations
local test_file="$TEST_DIR/perf_test.dat"
dd if=/dev/zero of="$test_file" bs=1M count=10 2>/dev/null
# Benchmark checksum calculation
local checksum_start=$(date +%s)
local checksum_start
checksum_start=$(date +%s)
md5sum "$test_file" > /dev/null
local checksum_time=$(($(date +%s) - checksum_start))
# Benchmark compression
local compress_start=$(date +%s)
local compress_start
compress_start=$(date +%s)
tar -czf "$TEST_DIR/perf_test.tar.gz" -C "$TEST_DIR" "perf_test.dat"
local compress_time=$(($(date +%s) - compress_start))
@@ -547,7 +571,8 @@ run_performance_tests() {
log_info " Total benchmark time: ${total_time}s"
# Record performance data
local perf_entry=$(jq -n \
local perf_entry
perf_entry=$(jq -n \
--arg checksum_time "$checksum_time" \
--arg compress_time "$compress_time" \
--arg total_time "$total_time" \
@@ -565,7 +590,8 @@ run_performance_tests() {
# Generate comprehensive test report
generate_test_report() {
local timestamp=$(date '+%Y-%m-%d %H:%M:%S')
local timestamp
timestamp=$(date '+%Y-%m-%d %H:%M:%S')
echo
echo "=============================================="
@@ -601,7 +627,8 @@ generate_test_report() {
# Save detailed results
if [ -f "$TEST_RESULTS_FILE" ]; then
local report_file="$TEST_DIR/test_report_$(date +%Y%m%d_%H%M%S).json"
local report_file
report_file="$TEST_DIR/test_report_$(date +%Y%m%d_%H%M%S).json"
jq -n \
--arg timestamp "$timestamp" \
--arg tests_run "$TESTS_RUN" \
@@ -645,22 +672,27 @@ run_integration_tests() {
run_performance_tests() {
log_info "Running performance benchmarks..."
local start_time=$(date +%s)
local start_time
start_time=$(date +%s)
# Create large test files
local large_file="$TEST_DIR/large_test.db"
dd if=/dev/zero of="$large_file" bs=1M count=100 2>/dev/null
# Benchmark checksum calculation
local checksum_start=$(date +%s)
local checksum_start
checksum_start=$(date +%s)
md5sum "$large_file" > /dev/null
local checksum_end=$(date +%s)
local checksum_end
checksum_end=$(date +%s)
local checksum_time=$((checksum_end - checksum_start))
# Benchmark compression
local compress_start=$(date +%s)
local compress_start
compress_start=$(date +%s)
tar -czf "$TEST_DIR/large_test.tar.gz" -C "$TEST_DIR" "large_test.db"
local compress_end=$(date +%s)
local compress_end
compress_end=$(date +%s)
local compress_time=$((compress_end - compress_start))
local total_time=$(($(date +%s) - start_time))

View File

@@ -0,0 +1,715 @@
#!/bin/bash
################################################################################
# Plex Backup System Comprehensive Test Suite
################################################################################
#
# Author: Peter Wood <peter@peterwood.dev>
# Description: Automated testing framework for the complete Plex backup
# ecosystem, providing unit tests, integration tests, and
# end-to-end validation of all backup operations.
#
# Features:
# - Unit testing for individual backup components
# - Integration testing for full backup workflows
# - Database integrity test scenarios
# - Service management testing
# - Performance benchmarking
# - Error condition simulation and recovery testing
# - Test result reporting and analysis
#
# Related Scripts:
# - backup-plex.sh: Primary script under test
# - restore-plex.sh: Restoration testing component
# - validate-plex-backups.sh: Validation testing
# - monitor-plex-backup.sh: Monitoring system testing
# - plex.sh: Service management testing
#
# Usage:
# ./test-plex-backup.sh # Run full test suite
# ./test-plex-backup.sh --unit # Unit tests only
# ./test-plex-backup.sh --integration # Integration tests only
# ./test-plex-backup.sh --quick # Quick smoke tests
# ./test-plex-backup.sh --cleanup # Clean up test artifacts
#
# Dependencies:
# - All Plex backup scripts in this directory
# - sqlite3 or Plex SQLite binary
# - jq (for JSON processing)
# - tar (for archive operations)
# - systemctl (for service testing)
#
# Exit Codes:
# 0 - All tests passed
# 1 - General error
# 2 - Test failures detected
# 3 - Missing dependencies
# 4 - Test setup failure
#
################################################################################
# Comprehensive Plex Backup System Test Suite
# This script provides automated testing for all backup-related functionality
set -e
# Color codes for output
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
CYAN='\033[0;36m'
NC='\033[0m' # No Color
# Test configuration
SCRIPT_DIR="$(dirname "$(readlink -f "$0")")"
TEST_DIR="/tmp/plex-backup-test-$(date +%s)"
TEST_BACKUP_ROOT="$TEST_DIR/backups"
TEST_LOG_ROOT="$TEST_DIR/logs"
TEST_RESULTS_FILE="$TEST_DIR/test-results.json"
# Test counters
TESTS_RUN=0
TESTS_PASSED=0
TESTS_FAILED=0
declare -a FAILED_TESTS=()
# Logging functions
log_test() {
local timestamp=$(date '+%Y-%m-%d %H:%M:%S')
echo -e "${CYAN}[TEST ${timestamp}]${NC} $1"
}
log_pass() {
local timestamp=$(date '+%Y-%m-%d %H:%M:%S')
echo -e "${GREEN}[PASS ${timestamp}]${NC} $1"
TESTS_PASSED=$((TESTS_PASSED + 1))
}
log_fail() {
local timestamp=$(date '+%Y-%m-%d %H:%M:%S')
echo -e "${RED}[FAIL ${timestamp}]${NC} $1"
TESTS_FAILED=$((TESTS_FAILED + 1))
FAILED_TESTS+=("$1")
}
log_info() {
local timestamp=$(date '+%Y-%m-%d %H:%M:%S')
echo -e "${BLUE}[INFO ${timestamp}]${NC} $1"
}
log_warn() {
local timestamp=$(date '+%Y-%m-%d %H:%M:%S')
echo -e "${YELLOW}[WARN ${timestamp}]${NC} $1"
}
# Test framework functions
run_test() {
local test_name="$1"
local test_function="$2"
TESTS_RUN=$((TESTS_RUN + 1))
log_test "Running: $test_name"
if $test_function; then
log_pass "$test_name"
record_test_result "$test_name" "PASS" ""
else
log_fail "$test_name"
record_test_result "$test_name" "FAIL" "Test function returned non-zero exit code"
fi
}
record_test_result() {
local test_name="$1"
local status="$2"
local error_message="$3"
local timestamp=$(date -Iseconds)
# Initialize results file if it doesn't exist
if [ ! -f "$TEST_RESULTS_FILE" ]; then
echo "[]" > "$TEST_RESULTS_FILE"
fi
local result=$(jq -n \
--arg test_name "$test_name" \
--arg status "$status" \
--arg error_message "$error_message" \
--arg timestamp "$timestamp" \
'{
test_name: $test_name,
status: $status,
error_message: $error_message,
timestamp: $timestamp
}')
jq --argjson result "$result" '. += [$result]' "$TEST_RESULTS_FILE" > "${TEST_RESULTS_FILE}.tmp" && \
mv "${TEST_RESULTS_FILE}.tmp" "$TEST_RESULTS_FILE"
}
# Setup test environment
setup_test_environment() {
log_info "Setting up test environment in $TEST_DIR"
# Create test directories
mkdir -p "$TEST_DIR"
mkdir -p "$TEST_BACKUP_ROOT"
mkdir -p "$TEST_LOG_ROOT"
mkdir -p "$TEST_DIR/mock_plex"
# Create mock Plex files for testing
echo "PRAGMA user_version=1;" > "$TEST_DIR/mock_plex/com.plexapp.plugins.library.db"
echo "PRAGMA user_version=1;" > "$TEST_DIR/mock_plex/com.plexapp.plugins.library.blobs.db"
dd if=/dev/zero of="$TEST_DIR/mock_plex/Preferences.xml" bs=1024 count=1 2>/dev/null
# Create mock performance log
echo "[]" > "$TEST_DIR/mock-performance.json"
echo "{}" > "$TEST_DIR/mock-backup.json"
log_info "Test environment setup complete"
}
# Cleanup test environment
cleanup_test_environment() {
if [ -d "$TEST_DIR" ]; then
log_info "Cleaning up test environment"
rm -rf "$TEST_DIR"
fi
}
# Mock functions to replace actual backup script functions
mock_manage_plex_service() {
local action="$1"
echo "Mock: Plex service $action"
return 0
}
mock_calculate_checksum() {
local file="$1"
echo "$(echo "$file" | md5sum | cut -d' ' -f1)"
return 0
}
mock_verify_backup() {
local src="$1"
local dest="$2"
# Always return success for testing
return 0
}
# Test: JSON log initialization
test_json_log_initialization() {
local test_log="$TEST_DIR/test-init.json"
# Remove file if it exists
rm -f "$test_log"
# Test initialization
if [ ! -f "$test_log" ] || ! jq empty "$test_log" 2>/dev/null; then
echo "{}" > "$test_log"
fi
# Verify file exists and is valid JSON
if [ -f "$test_log" ] && jq empty "$test_log" 2>/dev/null; then
return 0
else
return 1
fi
}
# Test: Performance tracking
test_performance_tracking() {
local test_perf_log="$TEST_DIR/test-performance.json"
echo "[]" > "$test_perf_log"
# Mock performance tracking function
track_performance_test() {
local operation="$1"
local start_time="$2"
local end_time=$(date +%s)
local duration=$((end_time - start_time))
local entry=$(jq -n \
--arg operation "$operation" \
--arg duration "$duration" \
--arg timestamp "$(date -Iseconds)" \
'{
operation: $operation,
duration_seconds: ($duration | tonumber),
timestamp: $timestamp
}')
jq --argjson entry "$entry" '. += [$entry]' "$test_perf_log" > "${test_perf_log}.tmp" && \
mv "${test_perf_log}.tmp" "$test_perf_log"
}
# Test tracking
local start_time=$(date +%s)
sleep 1 # Simulate work
track_performance_test "test_operation" "$start_time"
# Verify entry was added
local entry_count=$(jq length "$test_perf_log")
if [ "$entry_count" -eq 1 ]; then
return 0
else
return 1
fi
}
# Test: Notification system
test_notification_system() {
# Mock notification function
send_notification_test() {
local title="$1"
local message="$2"
local status="${3:-info}"
# Just verify parameters are received correctly
if [ -n "$title" ] && [ -n "$message" ]; then
echo "Notification: $title - $message ($status)" > "$TEST_DIR/notification.log"
return 0
else
return 1
fi
}
# Test notification
send_notification_test "Test Title" "Test Message" "success"
# Verify notification was processed
if [ -f "$TEST_DIR/notification.log" ] && grep -q "Test Title" "$TEST_DIR/notification.log"; then
return 0
else
return 1
fi
}
# Test: Checksum caching
test_checksum_caching() {
local test_file="$TEST_DIR/checksum_test.txt"
local cache_file="${test_file}.md5"
# Create test file
echo "test content" > "$test_file"
# Mock checksum function with caching
calculate_checksum_test() {
local file="$1"
local cache_file="${file}.md5"
local file_mtime=$(stat -c %Y "$file" 2>/dev/null || echo "0")
# Check cache
if [ -f "$cache_file" ]; then
local cache_mtime=$(stat -c %Y "$cache_file" 2>/dev/null || echo "0")
if [ "$cache_mtime" -gt "$file_mtime" ]; then
cat "$cache_file"
return 0
fi
fi
# Calculate and cache
local checksum=$(md5sum "$file" | cut -d' ' -f1)
echo "$checksum" > "$cache_file"
echo "$checksum"
}
# First calculation (should create cache)
local checksum1=$(calculate_checksum_test "$test_file")
# Second calculation (should use cache)
local checksum2=$(calculate_checksum_test "$test_file")
# Verify checksums match and cache file exists
if [ "$checksum1" = "$checksum2" ] && [ -f "$cache_file" ]; then
return 0
else
return 1
fi
}
# Test: Backup verification
test_backup_verification() {
local src_file="$TEST_DIR/source.txt"
local dest_file="$TEST_DIR/backup.txt"
# Create identical files
echo "backup test content" > "$src_file"
cp "$src_file" "$dest_file"
# Mock verification function
verify_backup_test() {
local src="$1"
local dest="$2"
local src_checksum=$(md5sum "$src" | cut -d' ' -f1)
local dest_checksum=$(md5sum "$dest" | cut -d' ' -f1)
if [ "$src_checksum" = "$dest_checksum" ]; then
return 0
else
return 1
fi
}
# Test verification
if verify_backup_test "$src_file" "$dest_file"; then
return 0
else
return 1
fi
}
# Test: Parallel processing framework
test_parallel_processing() {
local temp_dir=$(mktemp -d)
local -a pids=()
local total_jobs=5
local completed_jobs=0
# Simulate parallel jobs
for i in $(seq 1 $total_jobs); do
(
# Simulate work
sleep 0.$i
echo "$i" > "$temp_dir/job_$i.result"
) &
pids+=($!)
done
# Wait for all jobs
for pid in "${pids[@]}"; do
if wait "$pid"; then
completed_jobs=$((completed_jobs + 1))
fi
done
# Verify all jobs completed
local result_files=$(find "$temp_dir" -name "job_*.result" | wc -l)
# Cleanup
rm -rf "$temp_dir"
if [ "$completed_jobs" -eq "$total_jobs" ] && [ "$result_files" -eq "$total_jobs" ]; then
return 0
else
return 1
fi
}
# Test: Database integrity check simulation
test_database_integrity() {
local test_db="$TEST_DIR/test.db"
# Create a simple SQLite database
sqlite3 "$test_db" "CREATE TABLE test (id INTEGER, name TEXT);"
sqlite3 "$test_db" "INSERT INTO test VALUES (1, 'test');"
# Mock integrity check
check_integrity_test() {
local db_file="$1"
# Use sqlite3 instead of Plex SQLite for testing
local result=$(sqlite3 "$db_file" "PRAGMA integrity_check;" 2>/dev/null)
if echo "$result" | grep -q "ok"; then
return 0
else
return 1
fi
}
# Test integrity check
if check_integrity_test "$test_db"; then
return 0
else
return 1
fi
}
# Test: Configuration parsing
test_configuration_parsing() {
# Mock command line parsing
parse_args_test() {
local args=("$@")
local auto_repair=false
local parallel=true
local webhook=""
for arg in "${args[@]}"; do
case "$arg" in
--auto-repair) auto_repair=true ;;
--no-parallel) parallel=false ;;
--webhook=*) webhook="${arg#*=}" ;;
esac
done
# Return parsed values
echo "$auto_repair $parallel $webhook"
}
# Test parsing
local result=$(parse_args_test --auto-repair --webhook=http://example.com)
if echo "$result" | grep -q "true true http://example.com"; then
return 0
else
return 1
fi
}
# Test: Error handling
test_error_handling() {
# Mock function that can fail
test_function_with_error() {
local should_fail="$1"
if [ "$should_fail" = "true" ]; then
return 1
else
return 0
fi
}
# Test success case
if test_function_with_error "false"; then
# Test failure case
if ! test_function_with_error "true"; then
return 0 # Both cases worked as expected
fi
fi
return 1
}
# Run all unit tests
run_all_tests() {
log_info "Setting up test environment"
setup_test_environment
log_info "Starting unit tests"
# Core functionality tests
run_test "JSON Log Initialization" test_json_log_initialization
run_test "Performance Tracking" test_performance_tracking
run_test "Notification System" test_notification_system
run_test "Checksum Caching" test_checksum_caching
run_test "Backup Verification" test_backup_verification
run_test "Parallel Processing" test_parallel_processing
run_test "Database Integrity Check" test_database_integrity
run_test "Configuration Parsing" test_configuration_parsing
run_test "Error Handling" test_error_handling
log_info "Unit tests completed"
}
# Run integration tests (requires actual Plex environment)
run_integration_tests() {
log_info "Starting integration tests"
log_warn "Integration tests require a working Plex installation"
# Check if Plex service exists
if ! systemctl list-units --all | grep -q plexmediaserver; then
log_warn "Plex service not found - skipping integration tests"
return 0
fi
# Test actual service management (if safe to do so)
log_info "Integration tests would test actual Plex service management"
log_info "Skipping for safety - implement with caution"
}
# Run performance tests
run_performance_tests() {
log_info "Starting performance benchmarks"
local start_time=$(date +%s)
# Test file operations
local test_file="$TEST_DIR/perf_test.dat"
dd if=/dev/zero of="$test_file" bs=1M count=10 2>/dev/null
# Benchmark checksum calculation
local checksum_start=$(date +%s)
md5sum "$test_file" > /dev/null
local checksum_time=$(($(date +%s) - checksum_start))
# Benchmark compression
local compress_start=$(date +%s)
tar -czf "$TEST_DIR/perf_test.tar.gz" -C "$TEST_DIR" "perf_test.dat"
local compress_time=$(($(date +%s) - compress_start))
local total_time=$(($(date +%s) - start_time))
log_info "Performance Results:"
log_info " Checksum (10MB): ${checksum_time}s"
log_info " Compression (10MB): ${compress_time}s"
log_info " Total benchmark time: ${total_time}s"
# Record performance data
local perf_entry=$(jq -n \
--arg checksum_time "$checksum_time" \
--arg compress_time "$compress_time" \
--arg total_time "$total_time" \
--arg timestamp "$(date -Iseconds)" \
'{
benchmark: "performance_test",
checksum_time_seconds: ($checksum_time | tonumber),
compress_time_seconds: ($compress_time | tonumber),
total_time_seconds: ($total_time | tonumber),
timestamp: $timestamp
}')
echo "$perf_entry" > "$TEST_DIR/performance_results.json"
}
# Generate comprehensive test report
generate_test_report() {
local timestamp=$(date '+%Y-%m-%d %H:%M:%S')
echo
echo "=============================================="
echo " PLEX BACKUP TEST REPORT"
echo "=============================================="
echo "Test Run: $timestamp"
echo "Tests Run: $TESTS_RUN"
echo "Tests Passed: $TESTS_PASSED"
echo "Tests Failed: $TESTS_FAILED"
echo
if [ $TESTS_FAILED -gt 0 ]; then
echo "FAILED TESTS:"
for failed_test in "${FAILED_TESTS[@]}"; do
echo " - $failed_test"
done
echo
fi
local success_rate=0
if [ $TESTS_RUN -gt 0 ]; then
success_rate=$(( (TESTS_PASSED * 100) / TESTS_RUN ))
fi
echo "Success Rate: ${success_rate}%"
echo
if [ $TESTS_FAILED -eq 0 ]; then
log_pass "All tests passed successfully!"
else
log_fail "Some tests failed - review output above"
fi
# Save detailed results
if [ -f "$TEST_RESULTS_FILE" ]; then
local report_file="$TEST_DIR/test_report_$(date +%Y%m%d_%H%M%S).json"
jq -n \
--arg timestamp "$timestamp" \
--arg tests_run "$TESTS_RUN" \
--arg tests_passed "$TESTS_PASSED" \
--arg tests_failed "$TESTS_FAILED" \
--arg success_rate "$success_rate" \
--argjson failed_tests "$(printf '%s\n' "${FAILED_TESTS[@]}" | jq -R . | jq -s .)" \
--argjson test_details "$(cat "$TEST_RESULTS_FILE")" \
'{
test_run_timestamp: $timestamp,
summary: {
tests_run: ($tests_run | tonumber),
tests_passed: ($tests_passed | tonumber),
tests_failed: ($tests_failed | tonumber),
success_rate_percent: ($success_rate | tonumber)
},
failed_tests: $failed_tests,
detailed_results: $test_details
}' > "$report_file"
log_info "Detailed test report saved to: $report_file"
fi
}
# Integration tests (if requested)
run_integration_tests() {
log_info "Running integration tests..."
# Note: These would require actual Plex installation
# For now, we'll just indicate what would be tested
log_warn "Integration tests require running Plex Media Server"
log_warn "These tests would cover:"
log_warn " - Service stop/start functionality"
log_warn " - Database integrity checks"
log_warn " - Full backup and restore cycles"
log_warn " - Performance under load"
}
# Performance benchmarks
run_performance_tests() {
log_info "Running performance benchmarks..."
local start_time=$(date +%s)
# Create large test files
local large_file="$TEST_DIR/large_test.db"
dd if=/dev/zero of="$large_file" bs=1M count=100 2>/dev/null
# Benchmark checksum calculation
local checksum_start=$(date +%s)
md5sum "$large_file" > /dev/null
local checksum_end=$(date +%s)
local checksum_time=$((checksum_end - checksum_start))
# Benchmark compression
local compress_start=$(date +%s)
tar -czf "$TEST_DIR/large_test.tar.gz" -C "$TEST_DIR" "large_test.db"
local compress_end=$(date +%s)
local compress_time=$((compress_end - compress_start))
local total_time=$(($(date +%s) - start_time))
log_info "Performance Results:"
log_info " Checksum (100MB): ${checksum_time}s"
log_info " Compression (100MB): ${compress_time}s"
log_info " Total benchmark time: ${total_time}s"
}
# Main execution
main() {
case "${1:-all}" in
"unit")
run_all_tests
;;
"integration")
run_integration_tests
;;
"performance")
run_performance_tests
;;
"all")
run_all_tests
# Uncomment for integration tests if environment supports it
# run_integration_tests
run_performance_tests
;;
*)
echo "Usage: $0 [unit|integration|performance|all]"
echo " unit - Run unit tests only"
echo " integration - Run integration tests (requires Plex)"
echo " performance - Run performance benchmarks"
echo " all - Run all available tests"
exit 1
;;
esac
generate_test_report
# Exit with appropriate code
if [ $TESTS_FAILED -gt 0 ]; then
exit 1
else
exit 0
fi
}
# Trap to ensure cleanup on exit
trap cleanup_test_environment EXIT
main "$@"

View File

@@ -110,7 +110,8 @@ log_info() {
# Log synchronization functions
sync_logs_to_shared() {
local sync_start_time=$(date +%s)
local sync_start_time
sync_start_time=$(date +%s)
log_info "Starting log synchronization to shared location"
# Ensure shared log directory exists
@@ -131,7 +132,8 @@ sync_logs_to_shared() {
for log_file in "$LOCAL_LOG_ROOT"/*.log; do
if [ -f "$log_file" ]; then
local filename=$(basename "$log_file")
local filename
filename=$(basename "$log_file")
local shared_file="$SHARED_LOG_ROOT/$filename"
# Only copy if file doesn't exist in shared location or local is newer
@@ -147,7 +149,8 @@ sync_logs_to_shared() {
fi
done
local sync_end_time=$(date +%s)
local sync_end_time
sync_end_time=$(date +%s)
local sync_duration=$((sync_end_time - sync_start_time))
if [ $error_count -eq 0 ]; then
@@ -161,7 +164,8 @@ sync_logs_to_shared() {
# Cleanup old local logs (30 day retention)
cleanup_old_local_logs() {
local cleanup_start_time=$(date +%s)
local cleanup_start_time
cleanup_start_time=$(date +%s)
log_info "Starting cleanup of old local logs (30+ days)"
if [ ! -d "$LOCAL_LOG_ROOT" ]; then
@@ -174,7 +178,8 @@ cleanup_old_local_logs() {
# Find and remove log files older than 30 days
while IFS= read -r -d '' old_file; do
local filename=$(basename "$old_file")
local filename
filename=$(basename "$old_file")
if rm "$old_file" 2>/dev/null; then
((cleanup_count++))
log_info "Removed old log: $filename"
@@ -184,7 +189,8 @@ cleanup_old_local_logs() {
fi
done < <(find "$LOCAL_LOG_ROOT" -name "*.log" -mtime +30 -print0 2>/dev/null)
local cleanup_end_time=$(date +%s)
local cleanup_end_time
cleanup_end_time=$(date +%s)
local cleanup_duration=$((cleanup_end_time - cleanup_start_time))
if [ $cleanup_count -gt 0 ]; then
@@ -235,7 +241,8 @@ validate_backup_structure() {
return 1
fi
local backup_count=$(find "$BACKUP_ROOT" -maxdepth 1 -type f -name "plex-backup-*.tar.gz" | wc -l)
local backup_count
backup_count=$(find "$BACKUP_ROOT" -maxdepth 1 -type f -name "plex-backup-*.tar.gz" | wc -l)
log_info "Found $backup_count backup files"
if [ "$backup_count" -eq 0 ]; then
@@ -249,7 +256,8 @@ validate_backup_structure() {
# Validate individual backup
validate_backup() {
local backup_file="$1"
local backup_name=$(basename "$backup_file")
local backup_name
backup_name=$(basename "$backup_file")
local errors=0
log_info "Validating backup: $backup_name"
@@ -268,7 +276,8 @@ validate_backup() {
log_success "Archive integrity check passed: $backup_name"
# Check for expected files in archive
local archive_contents=$(tar -tzf "$backup_file" 2>/dev/null)
local archive_contents
archive_contents=$(tar -tzf "$backup_file" 2>/dev/null)
# Check if this is a legacy backup with dated subdirectory
local has_dated_subdir=false
@@ -300,8 +309,11 @@ validate_backup() {
log_success " Found: $file"
else
# Check if this is an optional file that might not exist in older backups
local backup_name=$(basename "$backup_file")
local backup_datetime=$(echo "$backup_name" | sed 's/plex-backup-\([0-9]\{8\}_[0-9]\{6\}\)\.tar\.gz/\1/')
local backup_name
backup_name=$(basename "$backup_file")
local backup_datetime
backup_datetime=${backup_name#plex-backup-}
backup_datetime=${backup_datetime%.tar.gz}
if [[ -n "${OPTIONAL_FILES[$file]}" ]] && [[ "$backup_datetime" < "${OPTIONAL_FILES[$file]}" ]]; then
log_warning " Missing file (expected for backup date): $file"
@@ -327,7 +339,7 @@ validate_backup() {
# Legacy format: extract filename from dated subdirectory
filename="${BASH_REMATCH[1]}"
# Remove timestamp suffix if present
filename=$(echo "$filename" | sed 's/\.[0-9]\{8\}_[0-9]\{6\}$//')
filename=${filename%.*_*}
elif [[ "$line" =~ ^\./(.+)$ ]]; then
# New format: extract filename from ./ prefix
filename="${BASH_REMATCH[1]}"
@@ -365,16 +377,20 @@ validate_backup() {
check_backup_freshness() {
log_info "Checking backup freshness..."
local latest_backup=$(find "$BACKUP_ROOT" -maxdepth 1 -type f -name "plex-backup-*.tar.gz" 2>/dev/null | sort | tail -1)
local latest_backup
latest_backup=$(find "$BACKUP_ROOT" -maxdepth 1 -type f -name "plex-backup-*.tar.gz" 2>/dev/null | sort | tail -1)
if [ -z "$latest_backup" ]; then
log_error "No backups found"
return 1
fi
local backup_filename=$(basename "$latest_backup")
local backup_filename
backup_filename=$(basename "$latest_backup")
# Extract date from filename: plex-backup-YYYYMMDD_HHMMSS.tar.gz
local backup_datetime=$(echo "$backup_filename" | sed 's/plex-backup-\([0-9]\{8\}_[0-9]\{6\}\)\.tar\.gz/\1/')
local backup_datetime
backup_datetime=${backup_filename#plex-backup-}
backup_datetime=${backup_datetime%.tar.gz}
# Validate that we extracted a valid datetime
if [[ ! "$backup_datetime" =~ ^[0-9]{8}_[0-9]{6}$ ]]; then
@@ -390,7 +406,8 @@ check_backup_freshness() {
return 1
fi
local current_timestamp=$(date +%s)
local current_timestamp
current_timestamp=$(date +%s)
local age_days=$(( (current_timestamp - backup_timestamp) / 86400 ))
log_info "Latest backup: $backup_datetime ($age_days days old)"
@@ -427,7 +444,8 @@ check_backup_sizes() {
while IFS= read -r backup_file; do
if [ -f "$backup_file" ] && [ -r "$backup_file" ]; then
backup_files+=("$backup_file")
local size=$(stat -f%z "$backup_file" 2>/dev/null || stat -c%s "$backup_file" 2>/dev/null || echo "0")
local size
size=$(stat -f%z "$backup_file" 2>/dev/null || stat -c%s "$backup_file" 2>/dev/null || echo "0")
backup_sizes+=("$size")
total_size=$((total_size + size))
fi
@@ -440,8 +458,10 @@ check_backup_sizes() {
# Calculate average size
local avg_size=$((total_size / ${#backup_files[@]}))
local human_total=$(numfmt --to=iec "$total_size" 2>/dev/null || echo "${total_size} bytes")
local human_avg=$(numfmt --to=iec "$avg_size" 2>/dev/null || echo "${avg_size} bytes")
local human_total
human_total=$(numfmt --to=iec "$total_size" 2>/dev/null || echo "${total_size} bytes")
local human_avg
human_avg=$(numfmt --to=iec "$avg_size" 2>/dev/null || echo "${avg_size} bytes")
log_info "Total backup size: $human_total"
log_info "Average backup size: $human_avg"
@@ -453,13 +473,17 @@ check_backup_sizes() {
for i in "${!backup_files[@]}"; do
local file="${backup_files[$i]}"
local size="${backup_sizes[$i]}"
local filename=$(basename "$file")
local filename
filename=$(basename "$file")
if [ "$size" -lt "$min_size" ] && [ "$size" -gt 0 ]; then
local human_size=$(numfmt --to=iec "$size" 2>/dev/null || echo "${size} bytes")
local human_size
human_size=$(numfmt --to=iec "$size" 2>/dev/null || echo "${size} bytes")
# Extract backup datetime to check if it's a pre-blobs backup
local backup_datetime=$(echo "$filename" | sed 's/plex-backup-\([0-9]\{8\}_[0-9]\{6\}\)\.tar\.gz/\1/')
local backup_datetime
backup_datetime=${filename#plex-backup-}
backup_datetime=${backup_datetime%.tar.gz}
if [[ "$backup_datetime" =~ ^[0-9]{8}_[0-9]{6}$ ]] && [[ "$backup_datetime" < "20250526_144500" ]]; then
log_info "Small backup (pre-blobs DB): $filename ($human_size)"
log_info " This backup predates the blobs database introduction, size is expected"
@@ -484,9 +508,12 @@ check_backup_sizes() {
check_disk_space() {
log_info "Checking disk space..."
local backup_disk_usage=$(du -sh "$BACKUP_ROOT" | cut -f1)
local available_space=$(df -h "$BACKUP_ROOT" | awk 'NR==2 {print $4}')
local used_percentage=$(df "$BACKUP_ROOT" | awk 'NR==2 {print $5}' | sed 's/%//')
local backup_disk_usage
backup_disk_usage=$(du -sh "$BACKUP_ROOT" | cut -f1)
local available_space
available_space=$(df -h "$BACKUP_ROOT" | awk 'NR==2 {print $4}')
local used_percentage
used_percentage=$(df "$BACKUP_ROOT" | awk 'NR==2 {print $5}' | sed 's/%//')
log_info "Backup disk usage: $backup_disk_usage"
log_info "Available space: $available_space"
@@ -513,10 +540,12 @@ generate_report() {
local total_errors=0
# Header
echo "==================================" >> "$REPORT_FILE"
echo "Plex Backup Validation Report" >> "$REPORT_FILE"
echo "Generated: $(date)" >> "$REPORT_FILE"
echo "==================================" >> "$REPORT_FILE"
{
echo "=================================="
echo "Plex Backup Validation Report"
echo "Generated: $(date)"
echo "=================================="
} >> "$REPORT_FILE"
# Use process substitution to avoid subshell variable scope issues
while IFS= read -r backup_file; do
@@ -532,11 +561,13 @@ generate_report() {
done < <(find "$BACKUP_ROOT" -maxdepth 1 -type f -name "plex-backup-*.tar.gz" | sort)
# Summary
echo >> "$REPORT_FILE"
echo "Summary:" >> "$REPORT_FILE"
echo " Total backups: $total_backups" >> "$REPORT_FILE"
echo " Valid backups: $valid_backups" >> "$REPORT_FILE"
echo " Total errors: $total_errors" >> "$REPORT_FILE"
{
echo
echo "Summary:"
echo " Total backups: $total_backups"
echo " Valid backups: $valid_backups"
echo " Total errors: $total_errors"
} >> "$REPORT_FILE"
log_success "Report generated: $REPORT_FILE"
}
@@ -546,7 +577,8 @@ fix_issues() {
log_info "Attempting to fix common issues..."
# Create corrupted backups directory
local corrupted_dir="$(dirname "$REPORT_FILE")/corrupted-backups"
local corrupted_dir
corrupted_dir="$(dirname "$REPORT_FILE")/corrupted-backups"
mkdir -p "$corrupted_dir"
# Check for and move corrupted backup files using process substitution
@@ -554,7 +586,8 @@ fix_issues() {
while IFS= read -r backup_file; do
if ! tar -tzf "$backup_file" >/dev/null 2>&1; then
log_warning "Found corrupted backup: $(basename "$backup_file")"
local backup_name=$(basename "$backup_file")
local backup_name
backup_name=$(basename "$backup_file")
local corrupted_backup="$corrupted_dir/$backup_name"
if mv "$backup_file" "$corrupted_backup"; then
@@ -650,9 +683,9 @@ main() {
# Check backup freshness
if ! check_backup_freshness; then
local freshness_result=$?
# Check if this is a "no backups found" error vs "old backup" warning
local backup_count=$(find "$BACKUP_ROOT" -maxdepth 1 -type f -name "plex-backup-*.tar.gz" 2>/dev/null | wc -l)
local backup_count
backup_count=$(find "$BACKUP_ROOT" -maxdepth 1 -type f -name "plex-backup-*.tar.gz" 2>/dev/null | wc -l)
if [ "$backup_count" -eq 0 ]; then
# No backups found - critical error
critical_errors=$((critical_errors + 1))

View File

@@ -82,11 +82,13 @@ check_service_status() {
print_status "$GREEN" "✓ Plex Media Server is running"
# Get service uptime
local uptime=$(systemctl show plexmediaserver --property=ActiveEnterTimestamp --value)
local uptime
uptime=$(systemctl show plexmediaserver --property=ActiveEnterTimestamp --value)
print_status "$GREEN" " Started: $uptime"
# Get memory usage
local memory=$(systemctl show plexmediaserver --property=MemoryCurrent --value)
local memory
memory=$(systemctl show plexmediaserver --property=MemoryCurrent --value)
if [[ -n "$memory" && "$memory" != "[not set]" ]]; then
local memory_mb=$((memory / 1024 / 1024))
print_status "$GREEN" " Memory usage: ${memory_mb}MB"
@@ -109,12 +111,14 @@ check_database_integrity() {
# Check main database
if [[ -f "$main_db" ]]; then
local main_size=$(du -h "$main_db" | cut -f1)
local main_size
main_size=$(du -h "$main_db" | cut -f1)
print_status "$GREEN" "✓ Main database exists (${main_size})"
# Try basic database operations
if sqlite3 "$main_db" "SELECT COUNT(*) FROM sqlite_master WHERE type='table';" >/dev/null 2>&1; then
local table_count=$(sqlite3 "$main_db" "SELECT COUNT(*) FROM sqlite_master WHERE type='table';" 2>/dev/null)
local table_count
table_count=$(sqlite3 "$main_db" "SELECT COUNT(*) FROM sqlite_master WHERE type='table';" 2>/dev/null)
print_status "$GREEN" " Contains $table_count tables"
else
print_status "$YELLOW" " Warning: Cannot query database tables"
@@ -127,13 +131,15 @@ check_database_integrity() {
# Check blobs database
if [[ -f "$blobs_db" ]]; then
local blobs_size=$(du -h "$blobs_db" | cut -f1)
local blobs_size
blobs_size=$(du -h "$blobs_db" | cut -f1)
print_status "$GREEN" "✓ Blobs database exists (${blobs_size})"
# Check if it's not empty (previous corruption was 0 bytes)
local blobs_bytes=$(stat -c%s "$blobs_db" 2>/dev/null || stat -f%z "$blobs_db" 2>/dev/null)
local blobs_bytes
blobs_bytes=$(stat -c%s "$blobs_db" 2>/dev/null || stat -f%z "$blobs_db" 2>/dev/null)
if [[ $blobs_bytes -gt 1000000 ]]; then
print_status "$GREEN" " File size is healthy ($(numfmt --to=iec $blobs_bytes))"
print_status "$GREEN" " File size is healthy ($(numfmt --to=iec "$blobs_bytes"))"
else
print_status "$RED" " Warning: File size is too small ($blobs_bytes bytes)"
all_good=false
@@ -144,8 +150,10 @@ check_database_integrity() {
fi
# Check file ownership
local main_owner=$(stat -c%U:%G "$main_db" 2>/dev/null)
local blobs_owner=$(stat -c%U:%G "$blobs_db" 2>/dev/null)
local main_owner
main_owner=$(stat -c%U:%G "$main_db" 2>/dev/null)
local blobs_owner
blobs_owner=$(stat -c%U:%G "$blobs_db" 2>/dev/null)
if [[ "$main_owner" == "plex:plex" && "$blobs_owner" == "plex:plex" ]]; then
print_status "$GREEN" "✓ Database ownership is correct (plex:plex)"
@@ -154,7 +162,11 @@ check_database_integrity() {
print_status "$YELLOW" " Main DB: $main_owner, Blobs DB: $blobs_owner"
fi
return $([[ "$all_good" == "true" ]] && echo 0 || echo 1)
if [[ "$all_good" == "true" ]]; then
return 0
else
return 1
fi
}
# Check web interface
@@ -185,7 +197,8 @@ check_api_functionality() {
print_header "API FUNCTIONALITY CHECK"
# Test root API endpoint
local api_response=$(curl -s "http://localhost:32400/" 2>/dev/null)
local api_response
api_response=$(curl -s "http://localhost:32400/" 2>/dev/null)
if echo "$api_response" | grep -q "Unauthorized\|web/index.html"; then
print_status "$GREEN" "✓ API is responding (redirect to web interface)"
@@ -194,7 +207,8 @@ check_api_functionality() {
fi
# Try to get server identity (this might work without auth)
local identity_response=$(curl -s "http://localhost:32400/identity" 2>/dev/null)
local identity_response
identity_response=$(curl -s "http://localhost:32400/identity" 2>/dev/null)
if echo "$identity_response" | grep -q "MediaContainer"; then
print_status "$GREEN" "✓ Server identity endpoint working"

View File

@@ -0,0 +1,272 @@
#!/bin/bash
################################################################################
# Plex Recovery Validation Script
################################################################################
#
# Author: Peter Wood <peter@peterwood.dev>
# Description: Comprehensive validation script that verifies the success of
# Plex database recovery operations. Performs extensive checks
# on database integrity, service functionality, and system health
# to ensure complete recovery and operational readiness.
#
# Features:
# - Database integrity verification
# - Service functionality testing
# - Library accessibility checks
# - Performance validation
# - Web interface connectivity testing
# - Comprehensive recovery reporting
# - Post-recovery optimization suggestions
#
# Related Scripts:
# - recover-plex-database.sh: Primary recovery script validated by this tool
# - icu-aware-recovery.sh: ICU recovery validation
# - nuclear-plex-recovery.sh: Nuclear recovery validation
# - backup-plex.sh: Backup system that enables recovery
# - validate-plex-backups.sh: Backup validation tools
# - plex.sh: General Plex service management
#
# Usage:
# ./validate-plex-recovery.sh # Full validation suite
# ./validate-plex-recovery.sh --quick # Quick validation checks
# ./validate-plex-recovery.sh --detailed # Detailed analysis and reporting
# ./validate-plex-recovery.sh --performance # Performance validation only
#
# Dependencies:
# - sqlite3 or Plex SQLite binary
# - curl (for web interface testing)
# - systemctl (for service status checks)
# - Plex Media Server
#
# Exit Codes:
# 0 - Recovery validation successful
# 1 - General error
# 2 - Database validation failures
# 3 - Service functionality issues
# 4 - Performance concerns detected
# 5 - Partial recovery (requires attention)
#
################################################################################
# Final Plex Recovery Validation Script
# Comprehensive check to ensure Plex is fully recovered and functional
# Colors for output
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m' # No Color
PLEX_DB_DIR="/var/lib/plexmediaserver/Library/Application Support/Plex Media Server/Plug-in Support/Databases"
print_status() {
local color="$1"
local message="$2"
echo -e "${color}${message}${NC}"
}
print_header() {
echo
print_status "$BLUE" "================================"
print_status "$BLUE" "$1"
print_status "$BLUE" "================================"
}
# Check service status
check_service_status() {
print_header "SERVICE STATUS CHECK"
if systemctl is-active --quiet plexmediaserver; then
print_status "$GREEN" "✓ Plex Media Server is running"
# Get service uptime
local uptime=$(systemctl show plexmediaserver --property=ActiveEnterTimestamp --value)
print_status "$GREEN" " Started: $uptime"
# Get memory usage
local memory=$(systemctl show plexmediaserver --property=MemoryCurrent --value)
if [[ -n "$memory" && "$memory" != "[not set]" ]]; then
local memory_mb=$((memory / 1024 / 1024))
print_status "$GREEN" " Memory usage: ${memory_mb}MB"
fi
return 0
else
print_status "$RED" "✗ Plex Media Server is not running"
return 1
fi
}
# Check database integrity
check_database_integrity() {
print_header "DATABASE INTEGRITY CHECK"
local main_db="${PLEX_DB_DIR}/com.plexapp.plugins.library.db"
local blobs_db="${PLEX_DB_DIR}/com.plexapp.plugins.library.blobs.db"
local all_good=true
# Check main database
if [[ -f "$main_db" ]]; then
local main_size=$(du -h "$main_db" | cut -f1)
print_status "$GREEN" "✓ Main database exists (${main_size})"
# Try basic database operations
if sqlite3 "$main_db" "SELECT COUNT(*) FROM sqlite_master WHERE type='table';" >/dev/null 2>&1; then
local table_count=$(sqlite3 "$main_db" "SELECT COUNT(*) FROM sqlite_master WHERE type='table';" 2>/dev/null)
print_status "$GREEN" " Contains $table_count tables"
else
print_status "$YELLOW" " Warning: Cannot query database tables"
all_good=false
fi
else
print_status "$RED" "✗ Main database missing"
all_good=false
fi
# Check blobs database
if [[ -f "$blobs_db" ]]; then
local blobs_size=$(du -h "$blobs_db" | cut -f1)
print_status "$GREEN" "✓ Blobs database exists (${blobs_size})"
# Check if it's not empty (previous corruption was 0 bytes)
local blobs_bytes=$(stat -c%s "$blobs_db" 2>/dev/null || stat -f%z "$blobs_db" 2>/dev/null)
if [[ $blobs_bytes -gt 1000000 ]]; then
print_status "$GREEN" " File size is healthy ($(numfmt --to=iec $blobs_bytes))"
else
print_status "$RED" " Warning: File size is too small ($blobs_bytes bytes)"
all_good=false
fi
else
print_status "$RED" "✗ Blobs database missing"
all_good=false
fi
# Check file ownership
local main_owner=$(stat -c%U:%G "$main_db" 2>/dev/null)
local blobs_owner=$(stat -c%U:%G "$blobs_db" 2>/dev/null)
if [[ "$main_owner" == "plex:plex" && "$blobs_owner" == "plex:plex" ]]; then
print_status "$GREEN" "✓ Database ownership is correct (plex:plex)"
else
print_status "$YELLOW" " Warning: Ownership issues detected"
print_status "$YELLOW" " Main DB: $main_owner, Blobs DB: $blobs_owner"
fi
return $([[ "$all_good" == "true" ]] && echo 0 || echo 1)
}
# Check web interface
check_web_interface() {
print_header "WEB INTERFACE CHECK"
local max_attempts=5
local attempt=1
while [[ $attempt -le $max_attempts ]]; do
if curl -s -o /dev/null -w "%{http_code}" "http://localhost:32400/web/index.html" | grep -q "200"; then
print_status "$GREEN" "✓ Web interface is accessible"
print_status "$GREEN" " URL: http://localhost:32400"
return 0
fi
print_status "$YELLOW" " Attempt $attempt/$max_attempts: Web interface not ready..."
sleep 2
((attempt++))
done
print_status "$RED" "✗ Web interface is not accessible"
return 1
}
# Check API functionality
check_api_functionality() {
print_header "API FUNCTIONALITY CHECK"
# Test root API endpoint
local api_response=$(curl -s "http://localhost:32400/" 2>/dev/null)
if echo "$api_response" | grep -q "Unauthorized\|web/index.html"; then
print_status "$GREEN" "✓ API is responding (redirect to web interface)"
else
print_status "$YELLOW" " Warning: Unexpected API response"
fi
# Try to get server identity (this might work without auth)
local identity_response=$(curl -s "http://localhost:32400/identity" 2>/dev/null)
if echo "$identity_response" | grep -q "MediaContainer"; then
print_status "$GREEN" "✓ Server identity endpoint working"
else
print_status "$YELLOW" " Note: Server identity requires authentication"
fi
}
# Check recent logs for errors
check_recent_logs() {
print_header "RECENT LOGS CHECK"
# Check for recent errors in systemd logs
local recent_errors=$(sudo journalctl -u plexmediaserver --since "5 minutes ago" --no-pager -q 2>/dev/null | grep -i "error\|fail\|exception" | head -3)
if [[ -z "$recent_errors" ]]; then
print_status "$GREEN" "✓ No recent errors in service logs"
else
print_status "$YELLOW" " Recent log entries found:"
echo "$recent_errors" | while read -r line; do
print_status "$YELLOW" " $line"
done
fi
}
# Show recovery summary
show_recovery_summary() {
print_header "RECOVERY SUMMARY"
local corrupted_backup_dir="${PLEX_DB_DIR}/corrupted-20250605_060232"
if [[ -d "$corrupted_backup_dir" ]]; then
print_status "$GREEN" "✓ Corrupted databases backed up to:"
print_status "$GREEN" " $corrupted_backup_dir"
fi
print_status "$GREEN" "✓ Databases restored from: 2025-06-02 backups"
print_status "$GREEN" "✓ File ownership corrected to plex:plex"
print_status "$GREEN" "✓ Service restarted successfully"
echo
print_status "$BLUE" "NEXT STEPS:"
print_status "$YELLOW" "1. Access Plex at: http://localhost:32400"
print_status "$YELLOW" "2. Verify your libraries are intact"
print_status "$YELLOW" "3. Consider running a library scan to pick up recent changes"
print_status "$YELLOW" "4. Monitor the service for a few days to ensure stability"
}
# Main function
main() {
print_status "$BLUE" "PLEX RECOVERY VALIDATION"
print_status "$BLUE" "$(date)"
echo
local overall_status=0
check_service_status || overall_status=1
check_database_integrity || overall_status=1
check_web_interface || overall_status=1
check_api_functionality
check_recent_logs
show_recovery_summary
echo
if [[ $overall_status -eq 0 ]]; then
print_status "$GREEN" "🎉 RECOVERY SUCCESSFUL! Plex Media Server is fully functional."
else
print_status "$YELLOW" "⚠️ RECOVERY PARTIALLY SUCCESSFUL - Some issues detected."
print_status "$YELLOW" " Plex is running but may need additional attention."
fi
return $overall_status
}
# Run the validation
main "$@"