mirror of
https://github.com/acedanger/shell.git
synced 2025-12-06 01:10:12 -08:00
1080 lines
35 KiB
Bash
Executable File
1080 lines
35 KiB
Bash
Executable File
#!/bin/bash
|
|
|
|
set -e
|
|
|
|
# Load the unified backup metrics library
|
|
SCRIPT_DIR="$(dirname "$(readlink -f "$0")")"
|
|
LIB_DIR="$SCRIPT_DIR/lib"
|
|
if [[ -f "$LIB_DIR/unified-backup-metrics.sh" ]]; then
|
|
# shellcheck source=lib/unified-backup-metrics.sh
|
|
source "$LIB_DIR/unified-backup-metrics.sh"
|
|
METRICS_ENABLED=true
|
|
else
|
|
echo "Warning: Unified backup metrics library not found at $LIB_DIR/unified-backup-metrics.sh"
|
|
METRICS_ENABLED=false
|
|
fi
|
|
|
|
# Color codes for output
|
|
RED='\033[0;31m'
|
|
GREEN='\033[0;32m'
|
|
YELLOW='\033[1;33m'
|
|
BLUE='\033[0;34m'
|
|
CYAN='\033[0;36m'
|
|
NC='\033[0m' # No Color
|
|
|
|
# Performance tracking variables
|
|
# Configuration
|
|
MAX_BACKUP_AGE_DAYS=30
|
|
MAX_BACKUPS_TO_KEEP=10
|
|
BACKUP_ROOT="/mnt/share/media/backups"
|
|
LOG_ROOT="/mnt/share/media/backups/logs"
|
|
SCRIPT_DIR="$(dirname "$(readlink -f "$0")")"
|
|
JSON_LOG_FILE="${SCRIPT_DIR}/logs/media-backup.json"
|
|
PERFORMANCE_LOG_FILE="${SCRIPT_DIR}/logs/media-backup-performance.json"
|
|
|
|
# Script options
|
|
PARALLEL_BACKUPS=true
|
|
VERIFY_BACKUPS=true
|
|
PERFORMANCE_MONITORING=true
|
|
WEBHOOK_URL="https://notify.peterwood.rocks/lab"
|
|
INTERACTIVE_MODE=false
|
|
DRY_RUN=false
|
|
|
|
# Show help function
|
|
show_help() {
|
|
cat << EOF
|
|
Media Services Backup Script
|
|
|
|
Usage: $0 [OPTIONS]
|
|
|
|
OPTIONS:
|
|
--dry-run Show what would be backed up without actually doing it
|
|
--no-verify Skip backup verification
|
|
--sequential Run backups sequentially instead of in parallel
|
|
--interactive Ask for confirmation before each backup
|
|
--webhook URL Custom webhook URL for notifications
|
|
-h, --help Show this help message
|
|
|
|
EXAMPLES:
|
|
$0 # Run full backup with default settings
|
|
$0 --dry-run # Preview what would be backed up
|
|
$0 --sequential # Run backups one at a time
|
|
$0 --no-verify # Skip verification for faster backup
|
|
|
|
SERVICES BACKED UP:
|
|
- Sonarr (TV Shows)
|
|
- Radarr (Movies)
|
|
- Prowlarr (Indexers)
|
|
- Audiobookshelf (Audiobooks)
|
|
- Tautulli (Plex Statistics)
|
|
- SABnzbd (Downloads)
|
|
- Jellyseerr (Requests)
|
|
|
|
EOF
|
|
}
|
|
|
|
# Parse command line arguments
|
|
while [[ $# -gt 0 ]]; do
|
|
case $1 in
|
|
--dry-run)
|
|
DRY_RUN=true
|
|
shift
|
|
;;
|
|
--no-verify)
|
|
VERIFY_BACKUPS=false
|
|
shift
|
|
;;
|
|
--sequential)
|
|
PARALLEL_BACKUPS=false
|
|
shift
|
|
;;
|
|
--interactive)
|
|
INTERACTIVE_MODE=true
|
|
shift
|
|
;;
|
|
--webhook)
|
|
WEBHOOK_URL="$2"
|
|
shift 2
|
|
;;
|
|
-h|--help)
|
|
show_help
|
|
exit 0
|
|
;;
|
|
*)
|
|
echo "Unknown option: $1"
|
|
show_help
|
|
exit 1
|
|
;;
|
|
esac
|
|
done
|
|
|
|
# Create necessary directories
|
|
mkdir -p "${SCRIPT_DIR}/logs"
|
|
mkdir -p "${BACKUP_ROOT}"/{sonarr,radarr,prowlarr,audiobookshelf,tautulli,sabnzbd,jellyseerr}
|
|
|
|
# Log files
|
|
LOG_FILE="${LOG_ROOT}/media-backup-$(date +%Y%m%d_%H%M%S).log"
|
|
MARKDOWN_LOG="${LOG_ROOT}/media-backup-$(date +%Y%m%d_%H%M%S).md"
|
|
|
|
# Define media services and their backup configurations
|
|
declare -A MEDIA_SERVICES=(
|
|
["sonarr"]="/config/Backups/scheduled"
|
|
["radarr"]="/config/Backups/scheduled"
|
|
["prowlarr"]="/config/Backups/scheduled"
|
|
["audiobookshelf"]="/metadata/backups"
|
|
["tautulli"]="/config/backups"
|
|
["sabnzbd"]="/config/sabnzbd.ini"
|
|
["jellyseerr_db"]="/config/db/db.sqlite3"
|
|
["jellyseerr_settings"]="/config/settings.json"
|
|
)
|
|
|
|
# Service-specific backup destinations
|
|
declare -A BACKUP_DESTINATIONS=(
|
|
["sonarr"]="${BACKUP_ROOT}/sonarr/"
|
|
["radarr"]="${BACKUP_ROOT}/radarr/"
|
|
["prowlarr"]="${BACKUP_ROOT}/prowlarr/"
|
|
["audiobookshelf"]="${BACKUP_ROOT}/audiobookshelf/"
|
|
["tautulli"]="${BACKUP_ROOT}/tautulli/"
|
|
["sabnzbd"]="${BACKUP_ROOT}/sabnzbd/sabnzbd_$(date +%Y%m%d).ini"
|
|
["jellyseerr_db"]="${BACKUP_ROOT}/jellyseerr/jellyseerr_db_$(date +%Y%m%d_%H%M%S).sqlite3"
|
|
["jellyseerr_settings"]="${BACKUP_ROOT}/jellyseerr/settings_$(date +%Y%m%d_%H%M%S).json"
|
|
)
|
|
|
|
# Logging functions
|
|
log_message() {
|
|
local message="$1"
|
|
local timestamp
|
|
timestamp=$(date '+%Y-%m-%d %H:%M:%S')
|
|
echo -e "${CYAN}[${timestamp}]${NC} ${message}"
|
|
echo "[${timestamp}] $message" >> "${LOG_FILE}" 2>/dev/null || true
|
|
}
|
|
|
|
log_error() {
|
|
local message="$1"
|
|
local timestamp
|
|
timestamp=$(date '+%Y-%m-%d %H:%M:%S')
|
|
echo -e "${RED}[${timestamp}] ERROR:${NC} ${message}" >&2
|
|
echo "[${timestamp}] ERROR: $message" >> "${LOG_FILE}" 2>/dev/null || true
|
|
}
|
|
|
|
log_success() {
|
|
local message="$1"
|
|
local timestamp
|
|
timestamp=$(date '+%Y-%m-%d %H:%M:%S')
|
|
echo -e "${GREEN}[${timestamp}] SUCCESS:${NC} ${message}"
|
|
echo "[${timestamp}] SUCCESS: $message" >> "${LOG_FILE}" 2>/dev/null || true
|
|
}
|
|
|
|
log_warning() {
|
|
local message="$1"
|
|
local timestamp
|
|
timestamp=$(date '+%Y-%m-%d %H:%M:%S')
|
|
echo -e "${YELLOW}[${timestamp}] WARNING:${NC} ${message}"
|
|
echo "[${timestamp}] WARNING: $message" >> "${LOG_FILE}" 2>/dev/null || true
|
|
}
|
|
|
|
log_info() {
|
|
local message="$1"
|
|
local timestamp
|
|
timestamp=$(date '+%Y-%m-%d %H:%M:%S')
|
|
echo -e "${BLUE}[${timestamp}] INFO:${NC} ${message}"
|
|
echo "[${timestamp}] INFO: $message" >> "${LOG_FILE}" 2>/dev/null || true
|
|
}
|
|
|
|
# Check if NAS mount is accessible
|
|
check_nas_mount() {
|
|
local mount_point="/mnt/share/media"
|
|
|
|
if ! mountpoint -q "$mount_point"; then
|
|
log_error "NAS not mounted at $mount_point"
|
|
log_info "Please mount the NAS first with: sudo mount $mount_point"
|
|
log_info "Or check the mounting setup guide in docs/nas-mount-setup-guide.md"
|
|
return 1
|
|
fi
|
|
|
|
# Test write access to backup directory
|
|
if [ ! -w "$BACKUP_ROOT" ]; then
|
|
log_error "No write access to backup directory: $BACKUP_ROOT"
|
|
log_info "Check NAS mount permissions and credentials"
|
|
return 1
|
|
fi
|
|
|
|
log_success "NAS mount check passed: $mount_point is accessible"
|
|
return 0
|
|
}
|
|
|
|
# Performance tracking functions
|
|
track_performance() {
|
|
if [ "$PERFORMANCE_MONITORING" != true ]; then
|
|
return 0
|
|
fi
|
|
|
|
local operation="$1"
|
|
local start_time="$2"
|
|
local end_time="${3:-$(date +%s)}"
|
|
local duration=$((end_time - start_time))
|
|
|
|
# Initialize performance log if it doesn't exist
|
|
if [ ! -f "$PERFORMANCE_LOG_FILE" ]; then
|
|
echo "[]" > "$PERFORMANCE_LOG_FILE"
|
|
fi
|
|
|
|
# Add performance entry with lock protection
|
|
local entry
|
|
entry=$(jq -n \
|
|
--arg timestamp "$(date -Iseconds)" \
|
|
--arg operation "$operation" \
|
|
--arg duration "$duration" \
|
|
--arg hostname "$(hostname)" \
|
|
'{
|
|
timestamp: $timestamp,
|
|
operation: $operation,
|
|
duration: ($duration | tonumber),
|
|
hostname: $hostname
|
|
}')
|
|
|
|
if command -v jq > /dev/null 2>&1; then
|
|
local lock_file="${PERFORMANCE_LOG_FILE}.lock"
|
|
local max_wait=10
|
|
local wait_count=0
|
|
|
|
while [ $wait_count -lt $max_wait ]; do
|
|
if (set -C; echo $$ > "$lock_file") 2>/dev/null; then
|
|
break
|
|
fi
|
|
sleep 0.1
|
|
((wait_count++))
|
|
done
|
|
|
|
if [ $wait_count -lt $max_wait ]; then
|
|
if jq --argjson entry "$entry" '. += [$entry]' "$PERFORMANCE_LOG_FILE" > "${PERFORMANCE_LOG_FILE}.tmp" 2>/dev/null; then
|
|
mv "${PERFORMANCE_LOG_FILE}.tmp" "$PERFORMANCE_LOG_FILE"
|
|
else
|
|
rm -f "${PERFORMANCE_LOG_FILE}.tmp"
|
|
fi
|
|
rm -f "$lock_file"
|
|
fi
|
|
fi
|
|
|
|
log_info "Performance: $operation completed in ${duration}s"
|
|
}
|
|
|
|
# Initialize JSON log file
|
|
initialize_json_log() {
|
|
if [ ! -f "${JSON_LOG_FILE}" ] || ! jq empty "${JSON_LOG_FILE}" 2>/dev/null; then
|
|
echo "{}" > "${JSON_LOG_FILE}"
|
|
log_message "Initialized JSON log file"
|
|
fi
|
|
}
|
|
|
|
# Enhanced function to log file details with markdown formatting
|
|
log_file_details() {
|
|
local service="$1"
|
|
local src="$2"
|
|
local dest="$3"
|
|
local status="$4"
|
|
local size=""
|
|
local checksum=""
|
|
|
|
# Calculate size if backup was successful
|
|
if [ "$status" == "SUCCESS" ] && [ -e "$dest" ]; then
|
|
size=$(du -sh "$dest" 2>/dev/null | cut -f1 || echo "Unknown")
|
|
if [ "$VERIFY_BACKUPS" == true ]; then
|
|
checksum=$(find "$dest" -type f -exec md5sum {} \; 2>/dev/null | md5sum | cut -d' ' -f1 || echo "N/A")
|
|
fi
|
|
else
|
|
size="N/A"
|
|
checksum="N/A"
|
|
fi
|
|
|
|
# Use a lock file for markdown log to prevent race conditions
|
|
local markdown_lock="${MARKDOWN_LOG}.lock"
|
|
local max_wait=30
|
|
local wait_count=0
|
|
|
|
while [ $wait_count -lt $max_wait ]; do
|
|
if (set -C; echo $$ > "$markdown_lock") 2>/dev/null; then
|
|
break
|
|
fi
|
|
sleep 0.1
|
|
((wait_count++))
|
|
done
|
|
|
|
if [ $wait_count -lt $max_wait ]; then
|
|
# Log to markdown file safely
|
|
{
|
|
echo "## $service Backup"
|
|
echo "- **Status**: $status"
|
|
echo "- **Source**: \$($src\)"
|
|
echo "- **Destination**: \$($dest\)"
|
|
echo "- **Size**: $size"
|
|
echo "- **Checksum**: $checksum"
|
|
echo "- **Timestamp**: $(date '+%Y-%m-%d %H:%M:%S')"
|
|
echo ""
|
|
} >> "$MARKDOWN_LOG"
|
|
|
|
rm -f "$markdown_lock"
|
|
else
|
|
log_warning "Could not acquire markdown log lock for $service"
|
|
fi
|
|
|
|
# Log to JSON
|
|
if command -v jq > /dev/null 2>&1; then
|
|
update_backup_log "$service" "$src" "$dest" "$status" "$size" "$checksum"
|
|
fi
|
|
}
|
|
|
|
# Update backup log in JSON format
|
|
update_backup_log() {
|
|
local service="$1"
|
|
local src="$2"
|
|
local dest="$3"
|
|
local status="$4"
|
|
local size="$5"
|
|
local checksum="$6"
|
|
local timestamp
|
|
timestamp=$(date -Iseconds)
|
|
|
|
if ! command -v jq > /dev/null 2>&1; then
|
|
return 0
|
|
fi
|
|
|
|
# Use a lock file for parallel safety
|
|
local lock_file="${JSON_LOG_FILE}.lock"
|
|
local max_wait=30
|
|
local wait_count=0
|
|
|
|
while [ $wait_count -lt $max_wait ]; do
|
|
if (set -C; echo $$ > "$lock_file") 2>/dev/null; then
|
|
break
|
|
fi
|
|
sleep 0.1
|
|
((wait_count++))
|
|
done
|
|
|
|
if [ $wait_count -ge $max_wait ]; then
|
|
log_warning "Could not acquire lock for JSON log update"
|
|
return 1
|
|
fi
|
|
|
|
# Create entry for this backup
|
|
local entry
|
|
entry=$(jq -n \
|
|
--arg service "$service" \
|
|
--arg src "$src" \
|
|
--arg dest "$dest" \
|
|
--arg status "$status" \
|
|
--arg size "$size" \
|
|
--arg checksum "$checksum" \
|
|
--arg timestamp "$timestamp" \
|
|
'{
|
|
service: $service,
|
|
source: $src,
|
|
destination: $dest,
|
|
status: $status,
|
|
size: $size,
|
|
checksum: $checksum,
|
|
timestamp: $timestamp
|
|
}')
|
|
|
|
# Update JSON log safely
|
|
if jq --argjson entry "$entry" --arg service "$service" \
|
|
'.[$service] = $entry' "$JSON_LOG_FILE" > "${JSON_LOG_FILE}.tmp" 2>/dev/null; then
|
|
mv "${JSON_LOG_FILE}.tmp" "$JSON_LOG_FILE"
|
|
else
|
|
rm -f "${JSON_LOG_FILE}.tmp"
|
|
fi
|
|
|
|
# Remove lock file
|
|
rm -f "$lock_file"
|
|
}
|
|
|
|
# Check if Docker container is running
|
|
check_container_running() {
|
|
local container="$1"
|
|
|
|
if ! docker ps --format "table {{.Names}}" | grep -q "^${container}$"; then
|
|
log_warning "Container '$container' is not running"
|
|
return 1
|
|
fi
|
|
|
|
return 0
|
|
}
|
|
|
|
# Verify backup integrity
|
|
verify_backup() {
|
|
local src_container="$1"
|
|
local src_path="$2"
|
|
local dest_path="$3"
|
|
|
|
if [ "$VERIFY_BACKUPS" != true ]; then
|
|
return 0
|
|
fi
|
|
|
|
log_info "Verifying backup integrity for $src_container:$src_path"
|
|
|
|
# For files, compare checksums
|
|
if [[ "$src_path" == *.ini ]] || [[ "$src_path" == *.json ]]; then
|
|
local src_checksum
|
|
local dest_checksum
|
|
src_checksum=$(docker exec "$src_container" md5sum "$src_path" 2>/dev/null | cut -d' ' -f1 || echo "")
|
|
dest_checksum=$(md5sum "$dest_path" 2>/dev/null | cut -d' ' -f1 || echo "")
|
|
|
|
if [ -n "$src_checksum" ] && [ -n "$dest_checksum" ] && [ "$src_checksum" == "$dest_checksum" ]; then
|
|
log_success "Backup verification passed for $src_container:$src_path"
|
|
return 0
|
|
else
|
|
log_error "Backup verification failed for $src_container:$src_path"
|
|
return 1
|
|
fi
|
|
fi
|
|
|
|
# For directories, check if they exist and have content
|
|
if [ -d "$dest_path" ]; then
|
|
local file_count
|
|
file_count=$(find "$dest_path" -type f 2>/dev/null | wc -l)
|
|
if [ "$file_count" -gt 0 ]; then
|
|
log_success "Backup verification passed for $src_container:$src_path ($file_count files)"
|
|
return 0
|
|
else
|
|
log_error "Backup verification failed: no files found in $dest_path"
|
|
return 1
|
|
fi
|
|
fi
|
|
|
|
log_warning "Unable to verify backup for $src_container:$src_path"
|
|
return 0
|
|
}
|
|
|
|
# Backup a single service
|
|
backup_service() {
|
|
local service="$1"
|
|
local container="$1"
|
|
local backup_start_time
|
|
backup_start_time=$(date +%s)
|
|
|
|
log_message "Starting backup for service: $service"
|
|
|
|
# Handle Jellyseerr services with specialized backup function
|
|
if [[ "$service" == jellyseerr_* ]]; then
|
|
return $(backup_jellyseerr_service "$service")
|
|
fi
|
|
|
|
# Handle special cases for container names
|
|
case "$service" in
|
|
jellyseerr_db|jellyseerr_settings)
|
|
container="jellyseerr"
|
|
;;
|
|
esac
|
|
|
|
# Check if container is running
|
|
if ! check_container_running "$container"; then
|
|
log_file_details "$service" "${container}:${MEDIA_SERVICES[$service]}" "${BACKUP_DESTINATIONS[$service]}" "FAILED - Container not running"
|
|
return 1
|
|
fi
|
|
|
|
local src_path="${MEDIA_SERVICES[$service]}"
|
|
local dest_path="${BACKUP_DESTINATIONS[$service]}"
|
|
|
|
# Perform the backup
|
|
if [ "$DRY_RUN" == true ]; then
|
|
log_info "DRY RUN: Would backup $container:$src_path to $dest_path"
|
|
log_file_details "$service" "$container:$src_path" "$dest_path" "DRY RUN"
|
|
return 0
|
|
fi
|
|
|
|
if [ "$INTERACTIVE_MODE" == true ]; then
|
|
echo -n "Backup $service? (y/N): "
|
|
read -r response
|
|
if [[ ! "$response" =~ ^[Yy]$ ]]; then
|
|
log_info "Skipping $service backup (user choice)"
|
|
return 0
|
|
fi
|
|
fi
|
|
|
|
# Execute docker cp command
|
|
local docker_cmd="docker cp $container:$src_path $dest_path"
|
|
log_info "Executing: $docker_cmd"
|
|
|
|
if $docker_cmd 2>&1 | tee -a "$LOG_FILE"; then
|
|
log_success "Backup completed for $service"
|
|
|
|
# File-level metrics tracking (success)
|
|
if [[ "$METRICS_ENABLED" == "true" ]]; then
|
|
local file_size checksum
|
|
if [ -f "$dest_path" ]; then
|
|
file_size=$(stat -c%s "$dest_path" 2>/dev/null || echo "0")
|
|
checksum=$(md5sum "$dest_path" 2>/dev/null | cut -d' ' -f1 || echo "")
|
|
metrics_add_file "$dest_path" "success" "$file_size" "$checksum"
|
|
elif [ -d "$dest_path" ]; then
|
|
# For directories, sum file sizes and add one entry for the directory
|
|
file_size=$(find "$dest_path" -type f -exec stat -c%s {} + 2>/dev/null | awk '{s+=$1} END {print s}' || echo "0")
|
|
metrics_add_file "$dest_path" "success" "$file_size"
|
|
fi
|
|
fi
|
|
|
|
# Verify the backup
|
|
if verify_backup "$container" "$src_path" "$dest_path"; then
|
|
log_file_details "$service" "$container:$src_path" "$dest_path" "SUCCESS"
|
|
track_performance "backup_${service}" "$backup_start_time"
|
|
return 0
|
|
else
|
|
log_file_details "$service" "$container:$src_path" "$dest_path" "VERIFICATION_FAILED"
|
|
# File-level metrics tracking (verification failed)
|
|
if [[ "$METRICS_ENABLED" == "true" ]]; then
|
|
local file_size
|
|
if [ -f "$dest_path" ]; then
|
|
file_size=$(stat -c%s "$dest_path" 2>/dev/null || echo "0")
|
|
metrics_add_file "$dest_path" "failed" "$file_size" "" "Verification failed"
|
|
elif [ -d "$dest_path" ]; then
|
|
file_size=$(find "$dest_path" -type f -exec stat -c%s {} + 2>/dev/null | awk '{s+=$1} END {print s}' || echo "0")
|
|
metrics_add_file "$dest_path" "failed" "$file_size" "" "Verification failed"
|
|
fi
|
|
fi
|
|
return 1
|
|
fi
|
|
else
|
|
log_error "Backup failed for $service"
|
|
log_file_details "$service" "$container:$src_path" "$dest_path" "FAILED"
|
|
# File-level metrics tracking (backup failed)
|
|
if [[ "$METRICS_ENABLED" == "true" ]]; then
|
|
local file_size
|
|
if [ -f "$dest_path" ]; then
|
|
file_size=$(stat -c%s "$dest_path" 2>/dev/null || echo "0")
|
|
metrics_add_file "$dest_path" "failed" "$file_size" "" "Backup failed"
|
|
elif [ -d "$dest_path" ]; then
|
|
file_size=$(find "$dest_path" -type f -exec stat -c%s {} + 2>/dev/null | awk '{s+=$1} END {print s}' || echo "0")
|
|
metrics_add_file "$dest_path" "failed" "$file_size" "" "Backup failed"
|
|
fi
|
|
fi
|
|
return 1
|
|
fi
|
|
}
|
|
|
|
# Specialized Jellyseerr backup function using SQLite CLI method
|
|
backup_jellyseerr_service() {
|
|
local service="$1"
|
|
local container="jellyseerr"
|
|
local backup_start_time
|
|
backup_start_time=$(date +%s)
|
|
|
|
log_message "Starting specialized Jellyseerr backup for: $service"
|
|
|
|
# Check if container is running
|
|
if ! check_container_running "$container"; then
|
|
log_file_details "$service" "${container}:${MEDIA_SERVICES[$service]}" "${BACKUP_DESTINATIONS[$service]}" "FAILED - Container not running"
|
|
return 1
|
|
fi
|
|
|
|
local src_path="${MEDIA_SERVICES[$service]}"
|
|
local dest_path="${BACKUP_DESTINATIONS[$service]}"
|
|
|
|
# Create destination directory
|
|
mkdir -p "$(dirname "$dest_path")"
|
|
|
|
# Perform the backup
|
|
if [ "$DRY_RUN" == true ]; then
|
|
log_info "DRY RUN: Would backup $container:$src_path to $dest_path"
|
|
log_file_details "$service" "$container:$src_path" "$dest_path" "DRY RUN"
|
|
return 0
|
|
fi
|
|
|
|
if [ "$INTERACTIVE_MODE" == true ]; then
|
|
echo -n "Backup $service? (y/N): "
|
|
read -r response
|
|
if [[ ! "$response" =~ ^[Yy]$ ]]; then
|
|
log_info "Skipping $service backup (user choice)"
|
|
return 0
|
|
fi
|
|
fi
|
|
|
|
# Handle different backup methods based on service type
|
|
local backup_success=false
|
|
|
|
if [[ "$service" == "jellyseerr_db" ]]; then
|
|
# Use SQLite CLI method for database backup (recommended by Jellyseerr docs)
|
|
log_info "Using SQLite CLI method for database backup"
|
|
|
|
# Create a temporary backup file path inside the container
|
|
local temp_backup="/tmp/jellyseerr_db_$(date +%Y%m%d_%H%M%S).sqlite3"
|
|
|
|
# Execute SQLite backup command inside the container
|
|
local sqlite_cmd="docker exec $container sqlite3 $src_path \".backup '$temp_backup'\""
|
|
log_info "Executing: $sqlite_cmd"
|
|
|
|
if eval "$sqlite_cmd" 2>&1 | tee -a "$LOG_FILE"; then
|
|
# Copy the backup file from container to host
|
|
local copy_cmd="docker cp $container:$temp_backup $dest_path"
|
|
log_info "Executing: $copy_cmd"
|
|
|
|
if $copy_cmd 2>&1 | tee -a "$LOG_FILE"; then
|
|
# Clean up temporary file in container
|
|
docker exec "$container" rm -f "$temp_backup" 2>/dev/null || true
|
|
backup_success=true
|
|
else
|
|
log_error "Failed to copy database backup from container"
|
|
# Clean up temporary file in container
|
|
docker exec "$container" rm -f "$temp_backup" 2>/dev/null || true
|
|
fi
|
|
else
|
|
log_error "SQLite backup command failed"
|
|
fi
|
|
|
|
elif [[ "$service" == "jellyseerr_settings" ]]; then
|
|
# Standard file copy for settings
|
|
local docker_cmd="docker cp $container:$src_path $dest_path"
|
|
log_info "Executing: $docker_cmd"
|
|
|
|
if $docker_cmd 2>&1 | tee -a "$LOG_FILE"; then
|
|
backup_success=true
|
|
else
|
|
log_error "Settings backup failed"
|
|
fi
|
|
else
|
|
log_error "Unknown Jellyseerr service type: $service"
|
|
return 1
|
|
fi
|
|
|
|
if [ "$backup_success" == true ]; then
|
|
log_success "Backup completed for $service"
|
|
|
|
# File-level metrics tracking (success)
|
|
if [[ "$METRICS_ENABLED" == "true" ]]; then
|
|
local file_size checksum
|
|
if [ -f "$dest_path" ]; then
|
|
file_size=$(stat -c%s "$dest_path" 2>/dev/null || echo "0")
|
|
checksum=$(md5sum "$dest_path" 2>/dev/null | cut -d' ' -f1 || echo "")
|
|
metrics_add_file "$dest_path" "success" "$file_size" "$checksum"
|
|
fi
|
|
fi
|
|
|
|
# Verify the backup
|
|
if verify_jellyseerr_backup "$container" "$src_path" "$dest_path" "$service"; then
|
|
log_file_details "$service" "$container:$src_path" "$dest_path" "SUCCESS"
|
|
track_performance "backup_${service}" "$backup_start_time"
|
|
return 0
|
|
else
|
|
log_file_details "$service" "$container:$src_path" "$dest_path" "VERIFICATION_FAILED"
|
|
# File-level metrics tracking (verification failed)
|
|
if [[ "$METRICS_ENABLED" == "true" ]]; then
|
|
local file_size
|
|
if [ -f "$dest_path" ]; then
|
|
file_size=$(stat -c%s "$dest_path" 2>/dev/null || echo "0")
|
|
metrics_add_file "$dest_path" "failed" "$file_size" "" "Verification failed"
|
|
fi
|
|
fi
|
|
return 1
|
|
fi
|
|
else
|
|
log_error "Backup failed for $service"
|
|
log_file_details "$service" "$container:$src_path" "$dest_path" "FAILED"
|
|
# File-level metrics tracking (backup failed)
|
|
if [[ "$METRICS_ENABLED" == "true" ]]; then
|
|
local file_size
|
|
if [ -f "$dest_path" ]; then
|
|
file_size=$(stat -c%s "$dest_path" 2>/dev/null || echo "0")
|
|
metrics_add_file "$dest_path" "failed" "$file_size" "" "Backup failed"
|
|
fi
|
|
fi
|
|
return 1
|
|
fi
|
|
}
|
|
|
|
# Specialized verification for Jellyseerr backups
|
|
verify_jellyseerr_backup() {
|
|
local src_container="$1"
|
|
local src_path="$2"
|
|
local dest_path="$3"
|
|
local service="$4"
|
|
|
|
if [ "$VERIFY_BACKUPS" != true ]; then
|
|
return 0
|
|
fi
|
|
|
|
log_info "Verifying Jellyseerr backup for $service"
|
|
|
|
# Check if backup file exists and has reasonable size
|
|
if [ ! -f "$dest_path" ]; then
|
|
log_error "Backup file not found: $dest_path"
|
|
return 1
|
|
fi
|
|
|
|
local file_size
|
|
file_size=$(stat -c%s "$dest_path" 2>/dev/null || echo "0")
|
|
|
|
if [ "$file_size" -eq 0 ]; then
|
|
log_error "Backup file is empty: $dest_path"
|
|
return 1
|
|
fi
|
|
|
|
# Service-specific verification
|
|
if [[ "$service" == "jellyseerr_db" ]]; then
|
|
# For SQLite database, verify it's a valid SQLite file
|
|
if command -v file >/dev/null 2>&1; then
|
|
if file "$dest_path" | grep -q "SQLite"; then
|
|
log_success "Database backup verification passed: Valid SQLite file ($file_size bytes)"
|
|
return 0
|
|
else
|
|
log_error "Database backup verification failed: Not a valid SQLite file"
|
|
return 1
|
|
fi
|
|
else
|
|
# Fallback: just check file size is reasonable (>1KB for a database)
|
|
if [ "$file_size" -gt 1024 ]; then
|
|
log_success "Database backup verification passed: File size check ($file_size bytes)"
|
|
return 0
|
|
else
|
|
log_error "Database backup verification failed: File too small ($file_size bytes)"
|
|
return 1
|
|
fi
|
|
fi
|
|
elif [[ "$service" == "jellyseerr_settings" ]]; then
|
|
# For settings JSON, verify it's valid JSON
|
|
if command -v jq >/dev/null 2>&1; then
|
|
if jq empty "$dest_path" 2>/dev/null; then
|
|
log_success "Settings backup verification passed: Valid JSON file ($file_size bytes)"
|
|
return 0
|
|
else
|
|
log_error "Settings backup verification failed: Invalid JSON file"
|
|
return 1
|
|
fi
|
|
else
|
|
# Fallback: check if it looks like JSON (starts with { or [)
|
|
if head -c 1 "$dest_path" | grep -q "[{\[]"; then
|
|
log_success "Settings backup verification passed: JSON format check ($file_size bytes)"
|
|
return 0
|
|
else
|
|
log_error "Settings backup verification failed: Does not appear to be JSON"
|
|
return 1
|
|
fi
|
|
fi
|
|
fi
|
|
|
|
log_warning "Unable to verify backup for $service"
|
|
return 0
|
|
}
|
|
|
|
# Clean up old backups based on age and count
|
|
cleanup_old_backups() {
|
|
log_message "Cleaning up old backups..."
|
|
|
|
for service_dir in "${BACKUP_ROOT}"/*; do
|
|
if [ ! -d "$service_dir" ]; then
|
|
continue
|
|
fi
|
|
|
|
local service
|
|
service=$(basename "$service_dir")
|
|
log_info "Cleaning up old backups for $service"
|
|
|
|
# Remove backups older than MAX_BACKUP_AGE_DAYS
|
|
find "$service_dir" -type f -mtime +${MAX_BACKUP_AGE_DAYS} -delete 2>/dev/null || true
|
|
find "$service_dir" -type d -empty -mtime +${MAX_BACKUP_AGE_DAYS} -delete 2>/dev/null || true
|
|
|
|
# Keep only the most recent MAX_BACKUPS_TO_KEEP backups
|
|
find "$service_dir" -type f -name "*.ini" -o -name "*.json" | sort -r | tail -n +$((MAX_BACKUPS_TO_KEEP + 1)) | xargs rm -f 2>/dev/null || true
|
|
|
|
# Clean up old dated directories (for jellyseerr)
|
|
find "$service_dir" -type d -name "backup_*" | sort -r | tail -n +$((MAX_BACKUPS_TO_KEEP + 1)) | xargs rm -rf 2>/dev/null || true
|
|
done
|
|
|
|
# Clean up old log files
|
|
find "$LOG_ROOT" -name "media-backup-*.log" -mtime +${MAX_BACKUP_AGE_DAYS} -delete 2>/dev/null || true
|
|
find "$LOG_ROOT" -name "media-backup-*.md" -mtime +${MAX_BACKUP_AGE_DAYS} -delete 2>/dev/null || true
|
|
|
|
log_success "Cleanup completed"
|
|
}
|
|
|
|
# Check disk space
|
|
check_disk_space() {
|
|
local required_space_mb=1000 # Minimum 1GB free space
|
|
|
|
local available_space_kb
|
|
available_space_kb=$(df "$BACKUP_ROOT" | awk 'NR==2 {print $4}')
|
|
local available_space_mb=$((available_space_kb / 1024))
|
|
|
|
if [ "$available_space_mb" -lt "$required_space_mb" ]; then
|
|
log_error "Insufficient disk space. Available: ${available_space_mb}MB, Required: ${required_space_mb}MB"
|
|
return 1
|
|
fi
|
|
|
|
log_info "Disk space check passed. Available: ${available_space_mb}MB"
|
|
return 0
|
|
}
|
|
|
|
# Send enhanced notification
|
|
send_notification() {
|
|
local title="$1"
|
|
local message="$2"
|
|
local status="${3:-info}"
|
|
local hostname
|
|
hostname=$(hostname)
|
|
local total_services=${#MEDIA_SERVICES[@]}
|
|
local success_count="$4"
|
|
local failed_count="$5"
|
|
|
|
# Enhanced message with statistics
|
|
local enhanced_message="$message\n\nServices: $total_services\nSuccessful: $success_count\nFailed: $failed_count\nHost: $hostname"
|
|
|
|
# Console notification
|
|
case "$status" in
|
|
"success") log_success "$title: $enhanced_message" ;;
|
|
"error") log_error "$title: $enhanced_message" ;;
|
|
"warning") log_warning "$title: $enhanced_message" ;;
|
|
*) log_info "$title: $enhanced_message" ;;
|
|
esac
|
|
|
|
# Webhook notification
|
|
if [ -n "$WEBHOOK_URL" ] && [ "$DRY_RUN" != true ]; then
|
|
local tags="backup,media,${hostname}"
|
|
[ "$failed_count" -gt 0 ] && tags="${tags},errors"
|
|
|
|
curl -s \
|
|
-H "tags:${tags}" \
|
|
-d "$enhanced_message" \
|
|
"$WEBHOOK_URL" 2>/dev/null || log_warning "Failed to send webhook notification"
|
|
fi
|
|
}
|
|
|
|
# Generate backup summary report
|
|
generate_summary_report() {
|
|
local success_count="$1"
|
|
local failed_count="$2"
|
|
local total_time="$3"
|
|
|
|
log_message "=== BACKUP SUMMARY REPORT ==="
|
|
log_message "Total Services: ${#MEDIA_SERVICES[@]}"
|
|
log_message "Successful Backups: $success_count"
|
|
log_message "Failed Backups: $failed_count"
|
|
log_message "Total Time: ${total_time}s"
|
|
log_message "Log File: $LOG_FILE"
|
|
log_message "Markdown Report: $MARKDOWN_LOG"
|
|
|
|
if [ "$PERFORMANCE_MONITORING" == true ]; then
|
|
log_message "Performance Log: $PERFORMANCE_LOG_FILE"
|
|
fi
|
|
|
|
# Add summary to markdown log
|
|
{
|
|
echo "# Media Backup Summary Report"
|
|
echo "**Date**: $(date '+%Y-%m-%d %H:%M:%S')"
|
|
echo "**Host**: $(hostname)"
|
|
echo "**Total Services**: ${#MEDIA_SERVICES[@]}"
|
|
echo "**Successful**: $success_count"
|
|
echo "**Failed**: $failed_count"
|
|
echo "**Duration**: ${total_time}s"
|
|
echo ""
|
|
} >> "$MARKDOWN_LOG"
|
|
}
|
|
|
|
# Main backup execution function
|
|
main() {
|
|
local script_start_time
|
|
script_start_time=$(date +%s)
|
|
|
|
log_message "=== MEDIA SERVICES BACKUP STARTED ==="
|
|
log_message "Host: $(hostname)"
|
|
log_message "Timestamp: $(date '+%Y-%m-%d %H:%M:%S')"
|
|
log_message "Dry Run: $DRY_RUN"
|
|
log_message "Parallel Mode: $PARALLEL_BACKUPS"
|
|
log_message "Verify Backups: $VERIFY_BACKUPS"
|
|
|
|
# Initialize metrics if enabled
|
|
if [[ "$METRICS_ENABLED" == "true" ]]; then
|
|
metrics_backup_start "media-services" "Media services backup (Sonarr, Radarr, etc.)" "$BACKUP_ROOT"
|
|
metrics_status_update "initializing" "Preparing media services backup"
|
|
fi
|
|
|
|
# Initialize logging
|
|
initialize_json_log
|
|
|
|
# Initialize markdown log
|
|
{
|
|
echo "# Media Services Backup Report"
|
|
echo "**Started**: $(date '+%Y-%m-%d %H:%M:%S')"
|
|
echo "**Host**: $(hostname)"
|
|
echo ""
|
|
} > "$MARKDOWN_LOG"
|
|
|
|
# Update metrics for pre-flight checks
|
|
if [[ "$METRICS_ENABLED" == "true" ]]; then
|
|
metrics_status_update "checking" "Running pre-flight checks"
|
|
fi
|
|
|
|
# Pre-flight checks
|
|
if ! check_nas_mount; then
|
|
if [[ "$METRICS_ENABLED" == "true" ]]; then
|
|
metrics_backup_complete "failed" "NAS mount not accessible"
|
|
fi
|
|
send_notification "Media Backup Failed" "NAS mount not accessible at $MOUNT_POINT" "error" 0 1
|
|
exit 1
|
|
fi
|
|
|
|
if ! check_disk_space; then
|
|
if [[ "$METRICS_ENABLED" == "true" ]]; then
|
|
metrics_backup_complete "failed" "Insufficient disk space"
|
|
fi
|
|
send_notification "Media Backup Failed" "Insufficient disk space" "error" 0 1
|
|
exit 1
|
|
fi
|
|
|
|
# Check if Docker is running
|
|
if ! docker info >/dev/null 2>&1; then
|
|
log_error "Docker is not running or accessible"
|
|
if [[ "$METRICS_ENABLED" == "true" ]]; then
|
|
metrics_backup_complete "failed" "Docker is not accessible"
|
|
fi
|
|
send_notification "Media Backup Failed" "Docker is not accessible" "error" 0 1
|
|
exit 1
|
|
fi
|
|
|
|
# Check if NAS mount is accessible
|
|
if ! check_nas_mount; then
|
|
if [[ "$METRICS_ENABLED" == "true" ]]; then
|
|
metrics_backup_complete "failed" "NAS mount not accessible"
|
|
fi
|
|
send_notification "Media Backup Failed" "NAS mount not accessible" "error" 0 1
|
|
exit 1
|
|
fi
|
|
|
|
local success_count=0
|
|
local failed_count=0
|
|
local backup_results=()
|
|
|
|
if [ "$PARALLEL_BACKUPS" == true ]; then
|
|
log_message "Running backups in parallel mode"
|
|
|
|
# Update metrics for parallel backup phase
|
|
if [[ "$METRICS_ENABLED" == "true" ]]; then
|
|
metrics_status_update "backing_up" "Running media service backups in parallel"
|
|
fi
|
|
|
|
# Create temporary file for collecting results
|
|
local temp_results
|
|
temp_results=$(mktemp)
|
|
local pids=()
|
|
|
|
# Start backup jobs in parallel
|
|
for service in "${!MEDIA_SERVICES[@]}"; do
|
|
backup_service_wrapper "$service" "$temp_results" &
|
|
pids+=($!)
|
|
log_info "Started backup job for $service (PID: $!)"
|
|
done
|
|
|
|
# Wait for all jobs to complete
|
|
for pid in "${pids[@]}"; do
|
|
wait "$pid"
|
|
log_info "Backup job completed (PID: $pid)"
|
|
done
|
|
|
|
# Collect results
|
|
while IFS= read -r result; do
|
|
if [[ "$result" == SUCCESS:* ]]; then
|
|
((success_count++))
|
|
backup_results+=("✓ ${result#SUCCESS:}")
|
|
elif [[ "$result" == FAILED:* ]]; then
|
|
((failed_count++))
|
|
backup_results+=("✗ ${result#FAILED:}")
|
|
fi
|
|
done < "$temp_results"
|
|
|
|
rm -f "$temp_results"
|
|
|
|
else
|
|
log_message "Running backups in sequential mode"
|
|
|
|
# Update metrics for sequential backup phase
|
|
if [[ "$METRICS_ENABLED" == "true" ]]; then
|
|
metrics_status_update "backing_up" "Running media service backups sequentially"
|
|
fi
|
|
|
|
# Run backups sequentially
|
|
for service in "${!MEDIA_SERVICES[@]}"; do
|
|
if backup_service "$service"; then
|
|
((success_count++))
|
|
backup_results+=("✓ $service")
|
|
else
|
|
((failed_count++))
|
|
backup_results+=("✗ $service")
|
|
fi
|
|
done
|
|
fi
|
|
|
|
# Calculate total time
|
|
local script_end_time
|
|
script_end_time=$(date +%s)
|
|
local total_time=$((script_end_time - script_start_time))
|
|
|
|
# Track overall performance
|
|
track_performance "full_media_backup" "$script_start_time" "$script_end_time"
|
|
|
|
# Update metrics for cleanup phase
|
|
if [[ "$METRICS_ENABLED" == "true" ]]; then
|
|
if [ "$DRY_RUN" != true ]; then
|
|
metrics_status_update "cleaning_up" "Cleaning up old backup files"
|
|
else
|
|
metrics_status_update "completed" "Dry run completed successfully"
|
|
fi
|
|
fi
|
|
|
|
# Clean up old backups (only if not dry run)
|
|
if [ "$DRY_RUN" != true ]; then
|
|
cleanup_old_backups
|
|
fi
|
|
|
|
# Generate summary report
|
|
generate_summary_report "$success_count" "$failed_count" "$total_time"
|
|
|
|
# Add results to markdown log
|
|
{
|
|
echo "## Backup Results"
|
|
for result in "${backup_results[@]}"; do
|
|
echo "- $result"
|
|
done
|
|
echo ""
|
|
echo "**Completed**: $(date '+%Y-%m-%d %H:%M:%S')"
|
|
echo "**Duration**: ${total_time}s"
|
|
} >> "$MARKDOWN_LOG"
|
|
|
|
# Send notification
|
|
local status="success"
|
|
local message="Media backup completed"
|
|
|
|
if [ "$failed_count" -gt 0 ]; then
|
|
status="warning"
|
|
message="Media backup completed with $failed_count failures"
|
|
fi
|
|
|
|
if [ "$DRY_RUN" == true ]; then
|
|
message="Media backup dry run completed"
|
|
status="info"
|
|
fi
|
|
|
|
send_notification "Media Backup Complete" "$message" "$status" "$success_count" "$failed_count"
|
|
|
|
# Finalize metrics
|
|
if [[ "$METRICS_ENABLED" == "true" ]]; then
|
|
if [ "$failed_count" -gt 0 ]; then
|
|
metrics_backup_complete "completed_with_errors" "Media backup completed with $failed_count failures"
|
|
elif [ "$DRY_RUN" == true ]; then
|
|
metrics_backup_complete "success" "Media backup dry run completed successfully"
|
|
else
|
|
metrics_backup_complete "success" "Media backup completed successfully"
|
|
fi
|
|
fi
|
|
|
|
# Exit with error code if any backups failed
|
|
if [ "$failed_count" -gt 0 ]; then
|
|
exit 1
|
|
fi
|
|
|
|
log_success "All media backups completed successfully!"
|
|
exit 0
|
|
}
|
|
|
|
# Trap to handle script interruption
|
|
trap 'log_error "Script interrupted"; exit 130' INT TERM
|
|
|
|
# Run main function
|
|
main "$@"
|