mirror of
https://github.com/acedanger/shell.git
synced 2025-12-06 02:20:11 -08:00
1155 lines
43 KiB
Bash
Executable File
1155 lines
43 KiB
Bash
Executable File
#!/bin/bash
|
|
|
|
set -e
|
|
|
|
# Color codes for output
|
|
RED='\033[0;31m'
|
|
GREEN='\033[0;32m'
|
|
YELLOW='\033[1;33m'
|
|
BLUE='\033[0;34m'
|
|
CYAN='\033[0;36m'
|
|
NC='\033[0m' # No Color
|
|
|
|
# Performance tracking variables
|
|
SCRIPT_START_TIME=$(date +%s)
|
|
BACKUP_START_TIME=""
|
|
VERIFICATION_START_TIME=""
|
|
SERVICE_STOP_TIME=""
|
|
SERVICE_START_TIME=""
|
|
|
|
# Configuration
|
|
MAX_BACKUP_AGE_DAYS=30
|
|
MAX_BACKUPS_TO_KEEP=10
|
|
BACKUP_ROOT="/mnt/share/media/backups/plex"
|
|
LOG_ROOT="/mnt/share/media/backups/logs"
|
|
SCRIPT_DIR="$(dirname "$(readlink -f "$0")")"
|
|
PERFORMANCE_LOG_FILE="${SCRIPT_DIR}/logs/plex-backup-performance.json"
|
|
|
|
# Backup strategy configuration - Always perform full backups
|
|
|
|
# Plex SQLite path (custom Plex SQLite binary)
|
|
PLEX_SQLITE="/usr/lib/plexmediaserver/Plex SQLite"
|
|
|
|
# Script options
|
|
AUTO_REPAIR=false
|
|
INTEGRITY_CHECK_ONLY=false
|
|
INTERACTIVE_MODE=true
|
|
PARALLEL_VERIFICATION=true
|
|
PERFORMANCE_MONITORING=true
|
|
WEBHOOK_URL="https://notify.peterwood.rocks/lab"
|
|
EMAIL_RECIPIENT=""
|
|
|
|
# Parse command line arguments
|
|
while [[ $# -gt 0 ]]; do
|
|
case $1 in
|
|
--auto-repair)
|
|
AUTO_REPAIR=true
|
|
INTERACTIVE_MODE=false
|
|
shift
|
|
;;
|
|
--check-integrity)
|
|
INTEGRITY_CHECK_ONLY=true
|
|
shift
|
|
;;
|
|
--non-interactive)
|
|
INTERACTIVE_MODE=false
|
|
shift
|
|
;;
|
|
--no-parallel)
|
|
PARALLEL_VERIFICATION=false
|
|
shift
|
|
;;
|
|
--no-performance)
|
|
PERFORMANCE_MONITORING=false
|
|
shift
|
|
;;
|
|
--webhook=*)
|
|
WEBHOOK_URL="${1#*=}"
|
|
shift
|
|
;;
|
|
--email=*)
|
|
EMAIL_RECIPIENT="${1#*=}"
|
|
shift
|
|
;;
|
|
-h|--help)
|
|
echo "Usage: $0 [OPTIONS]"
|
|
echo "Options:"
|
|
echo " --auto-repair Automatically attempt to repair corrupted databases"
|
|
echo " --check-integrity Only check database integrity, don't backup"
|
|
echo " --non-interactive Run in non-interactive mode (for automation)"
|
|
echo " --no-parallel Disable parallel verification (slower but safer)"
|
|
echo " --no-performance Disable performance monitoring"
|
|
echo " --webhook=URL Send notifications to webhook URL"
|
|
echo " --email=ADDRESS Send notifications to email address"
|
|
echo " -h, --help Show this help message"
|
|
echo ""
|
|
exit 0
|
|
;;
|
|
*)
|
|
echo "Unknown option: $1"
|
|
exit 1
|
|
;;
|
|
esac
|
|
done
|
|
|
|
# Create logs directory
|
|
mkdir -p "${SCRIPT_DIR}/logs"
|
|
|
|
# Define Plex files and their nicknames
|
|
declare -A PLEX_FILES=(
|
|
["database"]="/var/lib/plexmediaserver/Library/Application Support/Plex Media Server/Plug-in Support/Databases/com.plexapp.plugins.library.db"
|
|
["blobs"]="/var/lib/plexmediaserver/Library/Application Support/Plex Media Server/Plug-in Support/Databases/com.plexapp.plugins.library.blobs.db"
|
|
["preferences"]="/var/lib/plexmediaserver/Library/Application Support/Plex Media Server/Preferences.xml"
|
|
)
|
|
|
|
# Logging functions
|
|
log_message() {
|
|
local message="$1"
|
|
local timestamp=$(date '+%Y-%m-%d %H:%M:%S')
|
|
echo -e "${CYAN}[${timestamp}]${NC} ${message}"
|
|
echo "[${timestamp}] $message" >> "${LOG_ROOT}/plex-backup-$(date '+%Y-%m-%d').log" 2>/dev/null || true
|
|
}
|
|
|
|
log_error() {
|
|
local message="$1"
|
|
local timestamp=$(date '+%Y-%m-%d %H:%M:%S')
|
|
echo -e "${RED}[${timestamp}] ERROR:${NC} ${message}"
|
|
echo "[${timestamp}] ERROR: $message" >> "${LOG_ROOT}/plex-backup-$(date '+%Y-%m-%d').log" 2>/dev/null || true
|
|
}
|
|
|
|
log_success() {
|
|
local message="$1"
|
|
local timestamp=$(date '+%Y-%m-%d %H:%M:%S')
|
|
echo -e "${GREEN}[${timestamp}] SUCCESS:${NC} ${message}"
|
|
echo "[${timestamp}] SUCCESS: $message" >> "${LOG_ROOT}/plex-backup-$(date '+%Y-%m-%d').log" 2>/dev/null || true
|
|
}
|
|
|
|
log_warning() {
|
|
local message="$1"
|
|
local timestamp=$(date '+%Y-%m-%d %H:%M:%S')
|
|
echo -e "${YELLOW}[${timestamp}] WARNING:${NC} ${message}"
|
|
echo "[${timestamp}] WARNING: $message" >> "${LOG_ROOT}/plex-backup-$(date '+%Y-%m-%d').log" 2>/dev/null || true
|
|
}
|
|
|
|
log_info() {
|
|
local message="$1"
|
|
local timestamp=$(date '+%Y-%m-%d %H:%M:%S')
|
|
echo -e "${BLUE}[${timestamp}] INFO:${NC} ${message}"
|
|
echo "[${timestamp}] INFO: $message" >> "${LOG_ROOT}/plex-backup-$(date '+%Y-%m-%d').log" 2>/dev/null || true
|
|
}
|
|
|
|
# Performance tracking functions
|
|
track_performance() {
|
|
if [ "$PERFORMANCE_MONITORING" != true ]; then
|
|
return 0
|
|
fi
|
|
|
|
local operation="$1"
|
|
local start_time="$2"
|
|
local end_time="${3:-$(date +%s)}"
|
|
local duration=$((end_time - start_time))
|
|
|
|
# Initialize performance log if it doesn't exist
|
|
if [ ! -f "$PERFORMANCE_LOG_FILE" ]; then
|
|
mkdir -p "$(dirname "$PERFORMANCE_LOG_FILE")"
|
|
echo "[]" > "$PERFORMANCE_LOG_FILE"
|
|
fi
|
|
|
|
# Add performance entry
|
|
local entry=$(jq -n \
|
|
--arg operation "$operation" \
|
|
--arg duration "$duration" \
|
|
--arg timestamp "$(date -Iseconds)" \
|
|
'{
|
|
operation: $operation,
|
|
duration_seconds: ($duration | tonumber),
|
|
timestamp: $timestamp
|
|
}')
|
|
|
|
jq --argjson entry "$entry" '. += [$entry]' "$PERFORMANCE_LOG_FILE" > "${PERFORMANCE_LOG_FILE}.tmp" && \
|
|
mv "${PERFORMANCE_LOG_FILE}.tmp" "$PERFORMANCE_LOG_FILE"
|
|
|
|
log_info "Performance: $operation completed in ${duration}s"
|
|
}
|
|
|
|
# Initialize log directory
|
|
initialize_logs() {
|
|
mkdir -p "$(dirname "$PERFORMANCE_LOG_FILE")"
|
|
if [ ! -f "$PERFORMANCE_LOG_FILE" ]; then
|
|
echo "[]" > "$PERFORMANCE_LOG_FILE"
|
|
log_message "Initialized performance log file"
|
|
fi
|
|
}
|
|
|
|
# Enhanced notification system
|
|
send_notification() {
|
|
local title="$1"
|
|
local message="$2"
|
|
local status="${3:-info}" # success, error, warning, info
|
|
local hostname=$(hostname)
|
|
|
|
# Console notification
|
|
case "$status" in
|
|
success) log_success "$title: $message" ;;
|
|
error) log_error "$title: $message" ;;
|
|
warning) log_warning "$title: $message" ;;
|
|
*) log_info "$title: $message" ;;
|
|
esac
|
|
|
|
# Webhook notification
|
|
if [ -n "$WEBHOOK_URL" ]; then
|
|
local tags="backup,plex,${hostname}"
|
|
[ "$status" == "error" ] && tags="${tags},errors"
|
|
[ "$status" == "warning" ] && tags="${tags},warnings"
|
|
|
|
# Enhanced message with additional context
|
|
local enhanced_message="$message\n\nHost: $hostname\nTimestamp: $(date '+%Y-%m-%d %H:%M:%S')"
|
|
|
|
curl -s \
|
|
-H "tags:${tags}" \
|
|
-d "$enhanced_message" \
|
|
"$WEBHOOK_URL" 2>/dev/null || log_warning "Failed to send webhook notification"
|
|
fi
|
|
|
|
# Email notification (if sendmail is available)
|
|
if [ -n "$EMAIL_RECIPIENT" ] && command -v sendmail > /dev/null 2>&1; then
|
|
{
|
|
echo "To: $EMAIL_RECIPIENT"
|
|
echo "Subject: Plex Backup - $title"
|
|
echo "Content-Type: text/plain"
|
|
echo ""
|
|
echo "Host: $hostname"
|
|
echo "Time: $(date)"
|
|
echo "Status: $status"
|
|
echo ""
|
|
echo "$message"
|
|
} | sendmail "$EMAIL_RECIPIENT" 2>/dev/null || true
|
|
fi
|
|
}
|
|
|
|
# Enhanced checksum calculation with caching
|
|
calculate_checksum() {
|
|
local file="$1"
|
|
local cache_file="${file}.md5"
|
|
local file_mtime=$(stat -c %Y "$file" 2>/dev/null || echo "0")
|
|
|
|
# Check if cached checksum exists and is newer than file
|
|
if [ -f "$cache_file" ]; then
|
|
local cache_mtime=$(stat -c %Y "$cache_file" 2>/dev/null || echo "0")
|
|
if [ "$cache_mtime" -gt "$file_mtime" ]; then
|
|
local cached_checksum=$(cat "$cache_file" 2>/dev/null)
|
|
if [[ -n "$cached_checksum" && "$cached_checksum" =~ ^[a-f0-9]{32}$ ]]; then
|
|
echo "$cached_checksum"
|
|
return 0
|
|
fi
|
|
fi
|
|
fi
|
|
|
|
# Calculate new checksum
|
|
local checksum
|
|
checksum=$(md5sum "$file" 2>/dev/null | cut -d' ' -f1)
|
|
|
|
# Check if we got a valid checksum (not empty and looks like md5)
|
|
if [[ -n "$checksum" && "$checksum" =~ ^[a-f0-9]{32}$ ]]; then
|
|
# Cache the checksum
|
|
echo "$checksum" > "$cache_file" 2>/dev/null || true
|
|
echo "$checksum"
|
|
return 0
|
|
fi
|
|
|
|
# If normal access failed or returned empty, try with sudo
|
|
checksum=$(sudo md5sum "$file" 2>/dev/null | cut -d' ' -f1)
|
|
|
|
# Check if sudo checksum is valid
|
|
if [[ -n "$checksum" && "$checksum" =~ ^[a-f0-9]{32}$ ]]; then
|
|
# Cache the checksum with appropriate permissions
|
|
sudo bash -c "echo '$checksum' > '$cache_file'" 2>/dev/null || true
|
|
echo "$checksum"
|
|
return 0
|
|
fi
|
|
|
|
# If both fail, return error indicator
|
|
echo "PERMISSION_DENIED"
|
|
return 1
|
|
}
|
|
|
|
# Check database integrity using Plex SQLite
|
|
check_database_integrity() {
|
|
local db_file="$1"
|
|
local db_name=$(basename "$db_file")
|
|
|
|
log_message "Checking database integrity: $db_name"
|
|
|
|
# Check if Plex SQLite exists
|
|
if [ ! -f "$PLEX_SQLITE" ]; then
|
|
log_error "Plex SQLite binary not found at: $PLEX_SQLITE"
|
|
return 1
|
|
fi
|
|
|
|
# Make Plex SQLite executable if it isn't already
|
|
sudo chmod +x "$PLEX_SQLITE" 2>/dev/null || true
|
|
|
|
# Run integrity check
|
|
local integrity_result
|
|
integrity_result=$(sudo "$PLEX_SQLITE" "$db_file" "PRAGMA integrity_check;" 2>&1)
|
|
local check_exit_code=$?
|
|
|
|
if [ $check_exit_code -ne 0 ]; then
|
|
log_error "Failed to run integrity check on $db_name: $integrity_result"
|
|
return 1
|
|
fi
|
|
|
|
if echo "$integrity_result" | grep -q "^ok$"; then
|
|
log_success "Database integrity check passed: $db_name"
|
|
return 0
|
|
else
|
|
log_warning "Database integrity issues detected in $db_name:"
|
|
echo "$integrity_result" | while read -r line; do
|
|
log_warning " $line"
|
|
done
|
|
return 1
|
|
fi
|
|
}
|
|
|
|
# Advanced database repair using <https://github.com/ChuckPa/DBRepair/> project methods
|
|
repair_database() {
|
|
local db_file="$1"
|
|
local db_name=$(basename "$db_file")
|
|
local backup_file="${db_file}.pre-repair-backup"
|
|
local timestamp=$(date "+%Y-%m-%d_%H.%M.%S")
|
|
local db_dir=$(dirname "$db_file")
|
|
local temp_dir="${db_dir}/repair-temp-${timestamp}"
|
|
|
|
log_message "Starting advanced database repair for: $db_name"
|
|
|
|
# Create temporary repair directory
|
|
sudo mkdir -p "$temp_dir"
|
|
|
|
# Create backup before repair
|
|
if sudo cp "$db_file" "$backup_file"; then
|
|
log_success "Created pre-repair backup: $(basename "$backup_file")"
|
|
else
|
|
log_error "Failed to create pre-repair backup"
|
|
sudo rm -rf "$temp_dir" 2>/dev/null || true
|
|
return 1
|
|
fi
|
|
|
|
# Step 1: Database cleanup (DBRepair method)
|
|
log_message "Step 1: Database cleanup and optimization..."
|
|
|
|
local vacuum_result
|
|
vacuum_result=$(sudo "$PLEX_SQLITE" "$db_file" "VACUUM;" 2>&1)
|
|
local vacuum_exit_code=$?
|
|
|
|
if [ $vacuum_exit_code -ne 0 ]; then
|
|
log_warning "VACUUM failed: $vacuum_result"
|
|
log_message "Attempting dump/restore method..."
|
|
|
|
# Step 2: Dump and restore (fallback method)
|
|
local dump_file="${temp_dir}/${db_name}.sql"
|
|
local new_db_file="${temp_dir}/${db_name}.new"
|
|
|
|
log_message "Step 2: Dumping database to SQL..."
|
|
if sudo "$PLEX_SQLITE" "$db_file" ".dump" > "$dump_file" 2>/dev/null; then
|
|
log_success "Database dumped successfully"
|
|
|
|
log_message "Step 3: Creating new database from dump..."
|
|
if sudo "$PLEX_SQLITE" "$new_db_file" ".read $dump_file" 2>/dev/null; then
|
|
log_success "New database created successfully"
|
|
|
|
# Replace original with repaired version
|
|
if sudo mv "$new_db_file" "$db_file"; then
|
|
log_success "Database replaced with repaired version"
|
|
|
|
# Set proper ownership
|
|
sudo chown plex:plex "$db_file"
|
|
sudo chmod 644 "$db_file"
|
|
|
|
# Cleanup
|
|
sudo rm -rf "$temp_dir"
|
|
return 0
|
|
else
|
|
log_error "Failed to replace original database"
|
|
fi
|
|
else
|
|
log_error "Failed to create new database from dump"
|
|
fi
|
|
else
|
|
log_error "Failed to dump database"
|
|
fi
|
|
else
|
|
log_success "Database VACUUM completed successfully"
|
|
|
|
# Run reindex for good measure
|
|
log_message "Running REINDEX..."
|
|
local reindex_result
|
|
reindex_result=$(sudo "$PLEX_SQLITE" "$db_file" "REINDEX;" 2>&1)
|
|
local reindex_exit_code=$?
|
|
|
|
if [ $reindex_exit_code -eq 0 ]; then
|
|
log_success "Database REINDEX completed successfully"
|
|
sudo rm -rf "$temp_dir"
|
|
return 0
|
|
else
|
|
log_warning "REINDEX failed: $reindex_result"
|
|
fi
|
|
fi
|
|
|
|
# If we get here, repair failed
|
|
log_error "Database repair failed. Restoring original..."
|
|
if sudo mv "$backup_file" "$db_file"; then
|
|
log_success "Original database restored"
|
|
else
|
|
log_error "Failed to restore original database!"
|
|
fi
|
|
|
|
sudo rm -rf "$temp_dir"
|
|
return 1
|
|
}
|
|
|
|
# WAL (Write-Ahead Logging) file handling
|
|
handle_wal_files() {
|
|
local action="$1" # "backup" or "restore"
|
|
local backup_path="$2"
|
|
|
|
log_info "Handling WAL files: $action"
|
|
|
|
# Define WAL files that might exist
|
|
local wal_files=(
|
|
"/var/lib/plexmediaserver/Library/Application Support/Plex Media Server/Plug-in Support/Databases/com.plexapp.plugins.library.db-wal"
|
|
"/var/lib/plexmediaserver/Library/Application Support/Plex Media Server/Plug-in Support/Databases/com.plexapp.plugins.library.db-shm"
|
|
"/var/lib/plexmediaserver/Library/Application Support/Plex Media Server/Plug-in Support/Databases/com.plexapp.plugins.library.blobs.db-wal"
|
|
"/var/lib/plexmediaserver/Library/Application Support/Plex Media Server/Plug-in Support/Databases/com.plexapp.plugins.library.blobs.db-shm"
|
|
)
|
|
|
|
for wal_file in "${wal_files[@]}"; do
|
|
local wal_basename=$(basename "$wal_file")
|
|
|
|
case "$action" in
|
|
"backup")
|
|
if [ -f "$wal_file" ]; then
|
|
log_info "Found WAL/SHM file: $wal_basename"
|
|
local backup_file="${backup_path}/${wal_basename}"
|
|
|
|
if sudo cp "$wal_file" "$backup_file"; then
|
|
log_success "Backed up WAL/SHM file: $wal_basename"
|
|
|
|
# Verify backup
|
|
if verify_backup "$wal_file" "$backup_file"; then
|
|
log_success "Verified WAL/SHM backup: $wal_basename"
|
|
else
|
|
log_warning "WAL/SHM backup verification failed: $wal_basename"
|
|
fi
|
|
else
|
|
log_warning "Failed to backup WAL/SHM file: $wal_basename"
|
|
fi
|
|
else
|
|
log_info "WAL/SHM file not found (normal): $wal_basename"
|
|
fi
|
|
;;
|
|
"checkpoint")
|
|
# Force WAL checkpoint to integrate changes into main database
|
|
local db_file="${wal_file%.db-*}.db"
|
|
if [ -f "$db_file" ] && [ -f "$wal_file" ]; then
|
|
log_info "Performing WAL checkpoint for: $(basename "$db_file")"
|
|
if sudo "$PLEX_SQLITE" "$db_file" "PRAGMA wal_checkpoint(FULL);" 2>/dev/null; then
|
|
log_success "WAL checkpoint completed for: $(basename "$db_file")"
|
|
else
|
|
log_warning "WAL checkpoint failed for: $(basename "$db_file")"
|
|
fi
|
|
fi
|
|
;;
|
|
esac
|
|
done
|
|
}
|
|
|
|
# Enhanced database integrity check with WAL handling
|
|
check_database_integrity_with_wal() {
|
|
local db_file="$1"
|
|
local db_name=$(basename "$db_file")
|
|
|
|
log_message "Checking database integrity with WAL handling: $db_name"
|
|
|
|
# Check if Plex SQLite exists
|
|
if [ ! -f "$PLEX_SQLITE" ]; then
|
|
log_error "Plex SQLite binary not found at: $PLEX_SQLITE"
|
|
return 1
|
|
fi
|
|
|
|
# Make Plex SQLite executable if it isn't already
|
|
sudo chmod +x "$PLEX_SQLITE" 2>/dev/null || true
|
|
|
|
# Check if WAL file exists and handle it
|
|
local wal_file="${db_file}-wal"
|
|
if [ -f "$wal_file" ]; then
|
|
log_info "WAL file found for $db_name, performing checkpoint..."
|
|
if sudo "$PLEX_SQLITE" "$db_file" "PRAGMA wal_checkpoint(FULL);" 2>/dev/null; then
|
|
log_success "WAL checkpoint completed for $db_name"
|
|
else
|
|
log_warning "WAL checkpoint failed for $db_name, proceeding with integrity check"
|
|
fi
|
|
fi
|
|
|
|
# Run integrity check
|
|
local integrity_result
|
|
integrity_result=$(sudo "$PLEX_SQLITE" "$db_file" "PRAGMA integrity_check;" 2>&1)
|
|
local check_exit_code=$?
|
|
|
|
if [ $check_exit_code -ne 0 ]; then
|
|
log_error "Failed to run integrity check on $db_name: $integrity_result"
|
|
return 1
|
|
fi
|
|
|
|
if echo "$integrity_result" | grep -q "^ok$"; then
|
|
log_success "Database integrity check passed: $db_name"
|
|
return 0
|
|
else
|
|
log_warning "Database integrity issues detected in $db_name:"
|
|
echo "$integrity_result" | while read -r line; do
|
|
log_warning " $line"
|
|
done
|
|
return 1
|
|
fi
|
|
}
|
|
|
|
# Parallel verification function
|
|
verify_files_parallel() {
|
|
local backup_dir="$1"
|
|
local -a pids=()
|
|
local temp_dir=$(mktemp -d)
|
|
local verification_errors=0
|
|
|
|
if [ "$PARALLEL_VERIFICATION" != true ]; then
|
|
# Fall back to sequential verification
|
|
for nickname in "${!PLEX_FILES[@]}"; do
|
|
local src_file="${PLEX_FILES[$nickname]}"
|
|
local dest_file="$backup_dir/$(basename "$src_file")"
|
|
|
|
if [ -f "$dest_file" ]; then
|
|
if ! verify_backup "$src_file" "$dest_file"; then
|
|
verification_errors=$((verification_errors + 1))
|
|
fi
|
|
fi
|
|
done
|
|
return $verification_errors
|
|
fi
|
|
|
|
log_info "Starting parallel verification in $backup_dir"
|
|
|
|
# Start verification jobs in parallel
|
|
for nickname in "${!PLEX_FILES[@]}"; do
|
|
local src_file="${PLEX_FILES[$nickname]}"
|
|
local dest_file="$backup_dir/$(basename "$src_file")"
|
|
|
|
if [ -f "$dest_file" ]; then
|
|
(
|
|
local result_file="$temp_dir/$nickname.result"
|
|
if verify_backup "$src_file" "$dest_file"; then
|
|
echo "0" > "$result_file"
|
|
else
|
|
echo "1" > "$result_file"
|
|
fi
|
|
) &
|
|
pids+=($!)
|
|
fi
|
|
done
|
|
|
|
# Wait for all verification jobs to complete
|
|
for pid in "${pids[@]}"; do
|
|
wait "$pid"
|
|
done
|
|
|
|
# Collect results
|
|
for nickname in "${!PLEX_FILES[@]}"; do
|
|
local result_file="$temp_dir/$nickname.result"
|
|
if [ -f "$result_file" ]; then
|
|
local result=$(cat "$result_file")
|
|
if [ "$result" != "0" ]; then
|
|
verification_errors=$((verification_errors + 1))
|
|
fi
|
|
fi
|
|
done
|
|
|
|
# Cleanup
|
|
rm -rf "$temp_dir"
|
|
|
|
return $verification_errors
|
|
}
|
|
|
|
# Verify backup integrity
|
|
verify_backup() {
|
|
local src="$1"
|
|
local dest="$2"
|
|
|
|
log_message "Verifying backup integrity: $(basename "$src")"
|
|
|
|
local src_checksum=$(calculate_checksum "$src")
|
|
local src_result=$?
|
|
local dest_checksum=$(calculate_checksum "$dest")
|
|
local dest_result=$?
|
|
|
|
# Handle permission issues gracefully
|
|
if [ "$src_checksum" == "PERMISSION_DENIED" ]; then
|
|
log_warning "Cannot verify $(basename "$src") - permission denied on source file"
|
|
log_warning "Skipping verification for this file"
|
|
return 0 # Consider it successful since we can't verify
|
|
fi
|
|
|
|
if [ "$dest_checksum" == "PERMISSION_DENIED" ]; then
|
|
log_error "Cannot verify $(basename "$dest") - permission denied on backup file"
|
|
return 1
|
|
fi
|
|
|
|
if [ $src_result -ne 0 ] || [ $dest_result -ne 0 ]; then
|
|
log_error "Failed to calculate checksums for verification"
|
|
log_error "Source checksum result: $src_result, Dest checksum result: $dest_result"
|
|
return 1
|
|
fi
|
|
|
|
if [ "$src_checksum" == "$dest_checksum" ]; then
|
|
log_success "Backup verification passed: $(basename "$src")"
|
|
log_info "Source checksum: $src_checksum"
|
|
log_info "Backup checksum: $dest_checksum"
|
|
return 0
|
|
else
|
|
log_error "Backup verification failed: $(basename "$src")"
|
|
log_error "Source checksum: $src_checksum"
|
|
log_error "Backup checksum: $dest_checksum"
|
|
return 1
|
|
fi
|
|
}
|
|
|
|
# Enhanced service management with better monitoring
|
|
manage_plex_service() {
|
|
local action="$1"
|
|
local operation_start=$(date +%s)
|
|
|
|
log_message "Managing Plex service: $action"
|
|
|
|
case "$action" in
|
|
stop)
|
|
if [ "$action" == "stop" ]; then
|
|
SERVICE_STOP_TIME=$(date +%s)
|
|
fi
|
|
|
|
if sudo systemctl stop plexmediaserver.service; then
|
|
log_success "Plex service stopped"
|
|
# Wait for clean shutdown with progress indicator
|
|
local wait_time=0
|
|
local max_wait=15
|
|
|
|
while [ $wait_time -lt $max_wait ]; do
|
|
if ! sudo systemctl is-active --quiet plexmediaserver.service; then
|
|
log_success "Plex service confirmed stopped (${wait_time}s)"
|
|
track_performance "service_stop" "$operation_start"
|
|
return 0
|
|
fi
|
|
sleep 1
|
|
wait_time=$((wait_time + 1))
|
|
echo -n "."
|
|
done
|
|
echo
|
|
|
|
log_warning "Plex service may not have stopped cleanly after ${max_wait}s"
|
|
return 1
|
|
else
|
|
log_error "Failed to stop Plex service"
|
|
return 1
|
|
fi
|
|
;;
|
|
start)
|
|
if [ "$action" == "start" ]; then
|
|
SERVICE_START_TIME=$(date +%s)
|
|
fi
|
|
|
|
if sudo systemctl start plexmediaserver.service; then
|
|
log_success "Plex service start command issued"
|
|
# Wait for service to be fully running with progress indicator
|
|
local wait_time=0
|
|
local max_wait=30
|
|
|
|
while [ $wait_time -lt $max_wait ]; do
|
|
if sudo systemctl is-active --quiet plexmediaserver.service; then
|
|
log_success "Plex service confirmed running (${wait_time}s)"
|
|
track_performance "service_start" "$operation_start"
|
|
return 0
|
|
fi
|
|
sleep 1
|
|
wait_time=$((wait_time + 1))
|
|
echo -n "."
|
|
done
|
|
echo
|
|
|
|
log_error "Plex service failed to start within ${max_wait}s"
|
|
return 1
|
|
else
|
|
log_error "Failed to start Plex service"
|
|
return 1
|
|
fi
|
|
;;
|
|
*)
|
|
log_error "Invalid service action: $action"
|
|
return 1
|
|
;;
|
|
esac
|
|
}
|
|
|
|
# Check available disk space
|
|
check_disk_space() {
|
|
local backup_dir="$1"
|
|
local required_space_mb="$2"
|
|
|
|
local available_space_kb=$(df "$backup_dir" | awk 'NR==2 {print $4}')
|
|
local available_space_mb=$((available_space_kb / 1024))
|
|
|
|
if [ "$available_space_mb" -lt "$required_space_mb" ]; then
|
|
log_error "Insufficient disk space. Required: ${required_space_mb}MB, Available: ${available_space_mb}MB"
|
|
return 1
|
|
fi
|
|
|
|
log_message "Disk space check passed. Available: ${available_space_mb}MB"
|
|
return 0
|
|
}
|
|
|
|
# Estimate backup size
|
|
estimate_backup_size() {
|
|
local total_size=0
|
|
|
|
for nickname in "${!PLEX_FILES[@]}"; do
|
|
local file="${PLEX_FILES[$nickname]}"
|
|
if [ -f "$file" ]; then
|
|
local size_kb=$(du -k "$file" 2>/dev/null | cut -f1)
|
|
total_size=$((total_size + size_kb))
|
|
fi
|
|
done
|
|
|
|
echo $((total_size / 1024)) # Return size in MB
|
|
}
|
|
|
|
# Generate performance report
|
|
generate_performance_report() {
|
|
if [ "$PERFORMANCE_MONITORING" != true ] || [ ! -f "$PERFORMANCE_LOG_FILE" ]; then
|
|
return 0
|
|
fi
|
|
|
|
log_info "Performance Summary:"
|
|
|
|
# Recent performance data (last 10 entries)
|
|
jq -r '.[-10:] | .[] | " \(.operation): \(.duration_seconds)s (\(.timestamp))"' "$PERFORMANCE_LOG_FILE" 2>/dev/null || true
|
|
|
|
# Calculate averages for common operations
|
|
local avg_backup=$(jq '[.[] | select(.operation == "backup") | .duration_seconds] | if length > 0 then add/length else 0 end' "$PERFORMANCE_LOG_FILE" 2>/dev/null || echo "0")
|
|
local avg_verification=$(jq '[.[] | select(.operation == "verification") | .duration_seconds] | if length > 0 then add/length else 0 end' "$PERFORMANCE_LOG_FILE" 2>/dev/null || echo "0")
|
|
local avg_service_stop=$(jq '[.[] | select(.operation == "service_stop") | .duration_seconds] | if length > 0 then add/length else 0 end' "$PERFORMANCE_LOG_FILE" 2>/dev/null || echo "0")
|
|
local avg_service_start=$(jq '[.[] | select(.operation == "service_start") | .duration_seconds] | if length > 0 then add/length else 0 end' "$PERFORMANCE_LOG_FILE" 2>/dev/null || echo "0")
|
|
|
|
if [ "$avg_backup" != "0" ]; then
|
|
log_info "Average backup time: ${avg_backup}s"
|
|
fi
|
|
if [ "$avg_verification" != "0" ]; then
|
|
log_info "Average verification time: ${avg_verification}s"
|
|
fi
|
|
if [ "$avg_service_stop" != "0" ]; then
|
|
log_info "Average service stop time: ${avg_service_stop}s"
|
|
fi
|
|
if [ "$avg_service_start" != "0" ]; then
|
|
log_info "Average service start time: ${avg_service_start}s"
|
|
fi
|
|
}
|
|
|
|
# Clean old backups
|
|
cleanup_old_backups() {
|
|
log_message "Cleaning up old backups..."
|
|
|
|
# Remove backups older than MAX_BACKUP_AGE_DAYS
|
|
find "${BACKUP_ROOT}" -maxdepth 1 -type f -name "plex-backup-*.tar.gz" -mtime +${MAX_BACKUP_AGE_DAYS} -delete 2>/dev/null || true
|
|
|
|
# Keep only MAX_BACKUPS_TO_KEEP most recent backups
|
|
local backup_count=$(find "${BACKUP_ROOT}" -maxdepth 1 -type f -name "plex-backup-*.tar.gz" | wc -l)
|
|
|
|
if [ "$backup_count" -gt "$MAX_BACKUPS_TO_KEEP" ]; then
|
|
local excess_count=$((backup_count - MAX_BACKUPS_TO_KEEP))
|
|
log_message "Removing $excess_count old backup(s)..."
|
|
|
|
find "${BACKUP_ROOT}" -maxdepth 1 -type f -name "plex-backup-*.tar.gz" -printf '%T@ %p\n' | \
|
|
sort -n | head -n "$excess_count" | cut -d' ' -f2- | \
|
|
xargs -r rm -f
|
|
fi
|
|
|
|
# Clean up any remaining dated directories from old backup structure
|
|
find "${BACKUP_ROOT}" -maxdepth 1 -type d -name "????????" -exec rm -rf {} \; 2>/dev/null || true
|
|
|
|
log_message "Backup cleanup completed"
|
|
}
|
|
|
|
# Database integrity check only
|
|
check_integrity_only() {
|
|
log_message "Starting database integrity check at $(date)"
|
|
|
|
# Stop Plex service
|
|
manage_plex_service stop
|
|
|
|
# Handle WAL files first
|
|
handle_wal_files "checkpoint"
|
|
|
|
local db_integrity_issues=0
|
|
local databases_checked=0
|
|
|
|
for nickname in "${!PLEX_FILES[@]}"; do
|
|
local file="${PLEX_FILES[$nickname]}"
|
|
|
|
# Only check database files
|
|
if [[ "$file" == *".db" ]] && [ -f "$file" ]; then
|
|
databases_checked=$((databases_checked + 1))
|
|
log_message "Checking integrity of $(basename "$file")..."
|
|
|
|
if ! check_database_integrity_with_wal "$file"; then
|
|
db_integrity_issues=$((db_integrity_issues + 1))
|
|
log_warning "Database integrity issues found in $(basename "$file")"
|
|
|
|
# Determine if we should attempt repair
|
|
local should_repair=false
|
|
|
|
if [ "$AUTO_REPAIR" = true ]; then
|
|
should_repair=true
|
|
log_message "Auto-repair enabled, attempting repair..."
|
|
elif [ "$INTERACTIVE_MODE" = true ]; then
|
|
read -p "Attempt to repair $(basename "$file")? [y/N]: " -n 1 -r
|
|
echo
|
|
if [[ $REPLY =~ ^[Yy]$ ]]; then
|
|
should_repair=true
|
|
fi
|
|
else
|
|
log_warning "Non-interactive mode: skipping repair for $(basename "$file")"
|
|
fi
|
|
|
|
if [ "$should_repair" = true ]; then
|
|
if repair_database "$file"; then
|
|
log_success "Database repair successful for $(basename "$file")"
|
|
# Re-check integrity after repair
|
|
if check_database_integrity "$file"; then
|
|
log_success "Post-repair integrity check passed for $(basename "$file")"
|
|
else
|
|
log_warning "Post-repair integrity check still shows issues for $(basename "$file")"
|
|
fi
|
|
else
|
|
log_error "Database repair failed for $(basename "$file")"
|
|
fi
|
|
fi
|
|
else
|
|
log_success "Database integrity check passed for $(basename "$file")"
|
|
fi
|
|
fi
|
|
done
|
|
|
|
# Start Plex service
|
|
manage_plex_service start
|
|
|
|
# Summary
|
|
log_message "Integrity check completed at $(date)"
|
|
log_message "Databases checked: $databases_checked"
|
|
log_message "Databases with issues: $db_integrity_issues"
|
|
|
|
if [ "$db_integrity_issues" -gt 0 ]; then
|
|
log_warning "Integrity check completed with issues found"
|
|
exit 1
|
|
else
|
|
log_success "All database integrity checks passed"
|
|
exit 0
|
|
fi
|
|
}
|
|
|
|
# Main backup function
|
|
main() {
|
|
local overall_start=$(date +%s)
|
|
|
|
log_message "Starting enhanced Plex backup process at $(date)"
|
|
send_notification "Backup Started" "Plex backup process initiated" "info"
|
|
|
|
# Create necessary directories
|
|
mkdir -p "${BACKUP_ROOT}"
|
|
mkdir -p "${LOG_ROOT}"
|
|
|
|
# Initialize logs
|
|
initialize_logs
|
|
|
|
# Check if only doing integrity check
|
|
if [ "$INTEGRITY_CHECK_ONLY" = true ]; then
|
|
check_integrity_only
|
|
return $?
|
|
fi
|
|
|
|
# Estimate backup size
|
|
local estimated_size_mb=$(estimate_backup_size)
|
|
log_message "Estimated backup size: ${estimated_size_mb}MB"
|
|
|
|
# Check disk space (require 2x estimated size for safety)
|
|
local required_space_mb=$((estimated_size_mb * 2))
|
|
if ! check_disk_space "${BACKUP_ROOT}" "$required_space_mb"; then
|
|
log_error "Aborting backup due to insufficient disk space"
|
|
exit 1
|
|
fi
|
|
|
|
# Stop Plex service
|
|
manage_plex_service stop
|
|
|
|
local backup_errors=0
|
|
local files_backed_up=0
|
|
local BACKUP_PATH="${BACKUP_ROOT}"
|
|
|
|
# Ensure backup root directory exists
|
|
mkdir -p "$BACKUP_PATH"
|
|
|
|
# Handle WAL files and check database integrity before backup
|
|
log_message "Performing WAL checkpoint and checking database integrity before backup..."
|
|
handle_wal_files "checkpoint"
|
|
|
|
local db_integrity_issues=0
|
|
|
|
for nickname in "${!PLEX_FILES[@]}"; do
|
|
local file="${PLEX_FILES[$nickname]}"
|
|
|
|
# Only check database files
|
|
if [[ "$file" == *".db" ]] && [ -f "$file" ]; then
|
|
if ! check_database_integrity_with_wal "$file"; then
|
|
db_integrity_issues=$((db_integrity_issues + 1))
|
|
log_warning "Database integrity issues found in $(basename "$file")"
|
|
|
|
# Determine if we should attempt repair
|
|
local should_repair=false
|
|
|
|
if [ "$AUTO_REPAIR" = true ]; then
|
|
should_repair=true
|
|
log_message "Auto-repair enabled, attempting repair..."
|
|
elif [ "$INTERACTIVE_MODE" = true ]; then
|
|
read -p "Database $(basename "$file") has integrity issues. Attempt repair before backup? [y/N]: " -n 1 -r
|
|
echo
|
|
if [[ $REPLY =~ ^[Yy]$ ]]; then
|
|
should_repair=true
|
|
fi
|
|
else
|
|
log_warning "Non-interactive mode: backing up database with integrity issues"
|
|
fi
|
|
|
|
if [ "$should_repair" = true ]; then
|
|
if repair_database "$file"; then
|
|
log_success "Database repair successful for $(basename "$file")"
|
|
else
|
|
log_error "Database repair failed for $(basename "$file")"
|
|
backup_errors=$((backup_errors + 1))
|
|
fi
|
|
fi
|
|
fi
|
|
fi
|
|
done
|
|
|
|
# Handle WAL files backup
|
|
handle_wal_files "backup" "$BACKUP_PATH"
|
|
|
|
# Backup files - always perform full backup
|
|
local backup_start=$(date +%s)
|
|
for nickname in "${!PLEX_FILES[@]}"; do
|
|
local file="${PLEX_FILES[$nickname]}"
|
|
|
|
if [ -f "$file" ]; then
|
|
log_message "Backing up: $(basename "$file")"
|
|
|
|
# Create backup filename without timestamp (use original filename)
|
|
local backup_file="${BACKUP_PATH}/$(basename "$file")"
|
|
|
|
# Copy file
|
|
if sudo cp "$file" "$backup_file"; then
|
|
log_success "Copied: $(basename "$file")"
|
|
|
|
# Verify backup
|
|
if verify_backup "$file" "$backup_file"; then
|
|
log_success "Verified: $(basename "$file")"
|
|
files_backed_up=$((files_backed_up + 1))
|
|
else
|
|
log_error "Verification failed: $(basename "$file")"
|
|
backup_errors=$((backup_errors + 1))
|
|
# Remove failed backup
|
|
rm -f "$backup_file"
|
|
fi
|
|
else
|
|
log_error "Failed to copy: $(basename "$file")"
|
|
backup_errors=$((backup_errors + 1))
|
|
fi
|
|
else
|
|
log_warning "File not found: $file"
|
|
fi
|
|
done
|
|
|
|
# Start Plex service
|
|
manage_plex_service start
|
|
|
|
# Create archive if files were backed up
|
|
if [ "$files_backed_up" -gt 0 ]; then
|
|
log_message "Creating compressed archive..."
|
|
|
|
# Check backup root directory is writable
|
|
if [ ! -w "$BACKUP_ROOT" ]; then
|
|
log_error "Backup root directory is not writable: $BACKUP_ROOT"
|
|
backup_errors=$((backup_errors + 1))
|
|
else
|
|
local temp_archive="/tmp/plex-backup-$(date '+%Y%m%d_%H%M%S').tar.gz"
|
|
local final_archive="${BACKUP_ROOT}/plex-backup-$(date '+%Y%m%d_%H%M%S').tar.gz"
|
|
|
|
log_info "Temporary archive: $temp_archive"
|
|
log_info "Final archive: $final_archive"
|
|
|
|
# Create archive in /tmp first, containing only the backed up files
|
|
local temp_dir="/tmp/plex-backup-staging-$(date '+%Y%m%d_%H%M%S')"
|
|
if ! mkdir -p "$temp_dir"; then
|
|
log_error "Failed to create staging directory: $temp_dir"
|
|
backup_errors=$((backup_errors + 1))
|
|
else
|
|
log_info "Created staging directory: $temp_dir"
|
|
|
|
# Copy backed up files to staging directory
|
|
local files_staged=0
|
|
for nickname in "${!PLEX_FILES[@]}"; do
|
|
local file="${PLEX_FILES[$nickname]}"
|
|
local backup_file="${BACKUP_PATH}/$(basename "$file")"
|
|
if [ -f "$backup_file" ]; then
|
|
if cp "$backup_file" "$temp_dir/"; then
|
|
files_staged=$((files_staged + 1))
|
|
log_info "Staged for archive: $(basename "$backup_file")"
|
|
else
|
|
log_warning "Failed to stage file: $(basename "$backup_file")"
|
|
fi
|
|
else
|
|
log_warning "Backup file not found for staging: $(basename "$backup_file")"
|
|
fi
|
|
done
|
|
|
|
# Check if any files were staged
|
|
if [ "$files_staged" -eq 0 ]; then
|
|
log_error "No files were staged for archive creation"
|
|
rm -rf "$temp_dir"
|
|
backup_errors=$((backup_errors + 1))
|
|
else
|
|
log_info "Staged $files_staged files for archive creation"
|
|
|
|
# Check disk space in /tmp
|
|
local temp_available_kb=$(df /tmp | awk 'NR==2 {print $4}')
|
|
local temp_available_mb=$((temp_available_kb / 1024))
|
|
local staging_size_mb=$(du -sm "$temp_dir" | cut -f1)
|
|
log_info "/tmp available space: ${temp_available_mb}MB, staging directory size: ${staging_size_mb}MB"
|
|
|
|
# Check if we have enough space (require 3x staging size for compression)
|
|
local required_space_mb=$((staging_size_mb * 3))
|
|
if [ "$temp_available_mb" -lt "$required_space_mb" ]; then
|
|
log_error "Insufficient space in /tmp for archive creation. Required: ${required_space_mb}MB, Available: ${temp_available_mb}MB"
|
|
rm -rf "$temp_dir"
|
|
backup_errors=$((backup_errors + 1))
|
|
else
|
|
# Create archive with detailed error logging
|
|
log_info "Creating archive: $(basename "$temp_archive")"
|
|
local tar_output
|
|
tar_output=$(tar -czf "$temp_archive" -C "$temp_dir" . 2>&1)
|
|
local tar_exit_code=$?
|
|
|
|
if [ $tar_exit_code -eq 0 ]; then
|
|
# Verify archive was actually created and has reasonable size
|
|
if [ -f "$temp_archive" ]; then
|
|
local archive_size_mb=$(du -sm "$temp_archive" | cut -f1)
|
|
log_success "Archive created successfully: $(basename "$temp_archive") (${archive_size_mb}MB)"
|
|
|
|
# Test archive integrity before moving
|
|
if tar -tzf "$temp_archive" >/dev/null 2>&1; then
|
|
log_success "Archive integrity verified"
|
|
|
|
# Move the completed archive to the backup root
|
|
if mv "$temp_archive" "$final_archive"; then
|
|
log_success "Archive moved to final location: $(basename "$final_archive")"
|
|
|
|
# Remove individual backup files and staging directory
|
|
rm -rf "$temp_dir"
|
|
for nickname in "${!PLEX_FILES[@]}"; do
|
|
local file="${PLEX_FILES[$nickname]}"
|
|
local backup_file="${BACKUP_PATH}/$(basename "$file")"
|
|
rm -f "$backup_file" "$backup_file.md5"
|
|
done
|
|
else
|
|
log_error "Failed to move archive to final location: $final_archive"
|
|
log_error "Temporary archive remains at: $temp_archive"
|
|
rm -rf "$temp_dir"
|
|
backup_errors=$((backup_errors + 1))
|
|
fi
|
|
else
|
|
log_error "Archive integrity check failed - archive may be corrupted"
|
|
log_error "Archive size: ${archive_size_mb}MB"
|
|
rm -f "$temp_archive"
|
|
rm -rf "$temp_dir"
|
|
backup_errors=$((backup_errors + 1))
|
|
fi
|
|
else
|
|
log_error "Archive file was not created despite tar success"
|
|
rm -rf "$temp_dir"
|
|
backup_errors=$((backup_errors + 1))
|
|
fi
|
|
else
|
|
log_error "Failed to create archive (tar exit code: $tar_exit_code)"
|
|
if [ -n "$tar_output" ]; then
|
|
log_error "Tar command output: $tar_output"
|
|
fi
|
|
|
|
# Additional diagnostic information
|
|
log_error "Staging directory contents:"
|
|
ls -la "$temp_dir" 2>&1 | while IFS= read -r line; do
|
|
log_error " $line"
|
|
done
|
|
|
|
local temp_usage=$(df -h /tmp | awk 'NR==2 {print "Used: " $3 "/" $2 " (" $5 ")"}')
|
|
log_error "Temp filesystem status: $temp_usage"
|
|
|
|
rm -rf "$temp_dir"
|
|
backup_errors=$((backup_errors + 1))
|
|
fi
|
|
fi
|
|
fi
|
|
fi
|
|
fi
|
|
|
|
# Send notification
|
|
send_notification "Backup Completed" "Successfully backed up $files_backed_up files" "success"
|
|
else
|
|
log_message "No files needed backup"
|
|
fi
|
|
|
|
# Cleanup old backups
|
|
cleanup_old_backups
|
|
|
|
# Track overall backup performance
|
|
if [ "$files_backed_up" -gt 0 ]; then
|
|
track_performance "full_backup" "$backup_start"
|
|
fi
|
|
track_performance "total_script" "$overall_start"
|
|
|
|
# Generate performance report
|
|
generate_performance_report
|
|
|
|
# Final summary
|
|
local total_time=$(($(date +%s) - overall_start))
|
|
log_message "Backup process completed at $(date)"
|
|
log_message "Total execution time: ${total_time}s"
|
|
log_message "Files backed up: $files_backed_up"
|
|
log_message "Errors encountered: $backup_errors"
|
|
|
|
if [ "$backup_errors" -gt 0 ]; then
|
|
log_error "Backup completed with errors"
|
|
send_notification "Backup Error" "Backup completed with $backup_errors errors" "error"
|
|
exit 1
|
|
else
|
|
log_success "Enhanced backup completed successfully"
|
|
send_notification "Backup Success" "All $files_backed_up files backed up successfully in ${total_time}s" "success"
|
|
fi
|
|
}
|
|
|
|
# Trap to ensure Plex is restarted on script exit
|
|
trap 'manage_plex_service start' EXIT
|
|
|
|
# Run main function
|
|
main "$@"
|