mirror of
https://github.com/acedanger/shell.git
synced 2025-12-06 05:40:11 -08:00
feat: Add enhanced backup-media script and documentation
- Introduced demo-enhanced-backup.sh to showcase new features. - Created backup-media-enhancement-summary.md for a side-by-side comparison of original and enhanced scripts. - Developed enhanced-media-backup.md detailing features, usage, configuration, and error handling of the new backup script. - Enhanced logging, error handling, and performance monitoring capabilities. - Added support for multiple media services with improved safety and maintenance features.
This commit is contained in:
794
backup-media.sh
794
backup-media.sh
@@ -1,49 +1,773 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Create log directory if it doesn't exist
|
||||
mkdir -p /mnt/share/media/backups/logs
|
||||
set -e
|
||||
|
||||
# Log file with date and time
|
||||
LOG_FILE="/mnt/share/media/backups/logs/backup_log_$(date +%Y%m%d_%H%M%S).md"
|
||||
# Color codes for output
|
||||
RED='\033[0;31m'
|
||||
GREEN='\033[0;32m'
|
||||
YELLOW='\033[1;33m'
|
||||
BLUE='\033[0;34m'
|
||||
CYAN='\033[0;36m'
|
||||
NC='\033[0m' # No Color
|
||||
|
||||
# Function to log file details
|
||||
log_file_details() {
|
||||
local src=$1
|
||||
local dest=$2
|
||||
local size=$(du -sh "$dest" | cut -f1)
|
||||
echo "Source: $src" >> "$LOG_FILE"
|
||||
echo "Destination: $dest" >> "$LOG_FILE"
|
||||
echo "Size: $size" >> "$LOG_FILE"
|
||||
# Performance tracking variables
|
||||
SCRIPT_START_TIME=$(date +%s)
|
||||
BACKUP_START_TIME=""
|
||||
VERIFICATION_START_TIME=""
|
||||
|
||||
# Configuration
|
||||
MAX_BACKUP_AGE_DAYS=30
|
||||
MAX_BACKUPS_TO_KEEP=10
|
||||
BACKUP_ROOT="/mnt/share/media/backups"
|
||||
LOG_ROOT="/mnt/share/media/backups/logs"
|
||||
SCRIPT_DIR="$(dirname "$(readlink -f "$0")")"
|
||||
JSON_LOG_FILE="${SCRIPT_DIR}/logs/media-backup.json"
|
||||
PERFORMANCE_LOG_FILE="${SCRIPT_DIR}/logs/media-backup-performance.json"
|
||||
|
||||
# Script options
|
||||
PARALLEL_BACKUPS=true
|
||||
VERIFY_BACKUPS=true
|
||||
PERFORMANCE_MONITORING=true
|
||||
WEBHOOK_URL="https://notify.peterwood.rocks/lab"
|
||||
INTERACTIVE_MODE=false
|
||||
DRY_RUN=false
|
||||
|
||||
# Show help function
|
||||
show_help() {
|
||||
cat << EOF
|
||||
Media Services Backup Script
|
||||
|
||||
Usage: $0 [OPTIONS]
|
||||
|
||||
OPTIONS:
|
||||
--dry-run Show what would be backed up without actually doing it
|
||||
--no-verify Skip backup verification
|
||||
--sequential Run backups sequentially instead of in parallel
|
||||
--interactive Ask for confirmation before each backup
|
||||
--webhook URL Custom webhook URL for notifications
|
||||
-h, --help Show this help message
|
||||
|
||||
EXAMPLES:
|
||||
$0 # Run full backup with default settings
|
||||
$0 --dry-run # Preview what would be backed up
|
||||
$0 --sequential # Run backups one at a time
|
||||
$0 --no-verify # Skip verification for faster backup
|
||||
|
||||
SERVICES BACKED UP:
|
||||
- Sonarr (TV Shows)
|
||||
- Radarr (Movies)
|
||||
- Prowlarr (Indexers)
|
||||
- Audiobookshelf (Audiobooks)
|
||||
- Tautulli (Plex Statistics)
|
||||
- SABnzbd (Downloads)
|
||||
- Jellyseerr (Requests)
|
||||
|
||||
EOF
|
||||
}
|
||||
|
||||
# Backup and log details
|
||||
docker cp sonarr:/config/Backups/scheduled /mnt/share/media/backups/sonarr/
|
||||
log_file_details "sonarr:/config/Backups/scheduled" "/mnt/share/media/backups/sonarr/"
|
||||
# Parse command line arguments
|
||||
while [[ $# -gt 0 ]]; do
|
||||
case $1 in
|
||||
--dry-run)
|
||||
DRY_RUN=true
|
||||
shift
|
||||
;;
|
||||
--no-verify)
|
||||
VERIFY_BACKUPS=false
|
||||
shift
|
||||
;;
|
||||
--sequential)
|
||||
PARALLEL_BACKUPS=false
|
||||
shift
|
||||
;;
|
||||
--interactive)
|
||||
INTERACTIVE_MODE=true
|
||||
shift
|
||||
;;
|
||||
--webhook)
|
||||
WEBHOOK_URL="$2"
|
||||
shift 2
|
||||
;;
|
||||
-h|--help)
|
||||
show_help
|
||||
exit 0
|
||||
;;
|
||||
*)
|
||||
echo "Unknown option: $1"
|
||||
show_help
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
docker cp radarr:/config/Backups/scheduled /mnt/share/media/backups/radarr/
|
||||
log_file_details "radarr:/config/Backups/scheduled" "/mnt/share/media/backups/radarr/"
|
||||
# Create necessary directories
|
||||
mkdir -p "${SCRIPT_DIR}/logs"
|
||||
mkdir -p "${BACKUP_ROOT}"/{sonarr,radarr,prowlarr,audiobookshelf,tautulli,sabnzbd,jellyseerr}
|
||||
|
||||
docker cp prowlarr:/config/Backups/scheduled /mnt/share/media/backups/prowlarr/
|
||||
log_file_details "prowlarr:/config/Backups/scheduled" "/mnt/share/media/backups/prowlarr/"
|
||||
# Log files
|
||||
LOG_FILE="${LOG_ROOT}/media-backup-$(date +%Y%m%d_%H%M%S).log"
|
||||
MARKDOWN_LOG="${LOG_ROOT}/media-backup-$(date +%Y%m%d_%H%M%S).md"
|
||||
|
||||
docker cp audiobookshelf:/metadata/backups /mnt/share/media/backups/audiobookshelf/
|
||||
log_file_details "audiobookshelf:/metadata/backups" "/mnt/share/media/backups/audiobookshelf/"
|
||||
# Define media services and their backup configurations
|
||||
declare -A MEDIA_SERVICES=(
|
||||
["sonarr"]="/config/Backups/scheduled"
|
||||
["radarr"]="/config/Backups/scheduled"
|
||||
["prowlarr"]="/config/Backups/scheduled"
|
||||
["audiobookshelf"]="/metadata/backups"
|
||||
["tautulli"]="/config/backups"
|
||||
["sabnzbd"]="/config/sabnzbd.ini"
|
||||
["jellyseerr_db"]="/config/db/"
|
||||
["jellyseerr_settings"]="/config/settings.json"
|
||||
)
|
||||
|
||||
docker cp tautulli:/config/backups /mnt/share/media/backups/tautulli/
|
||||
log_file_details "tautulli:/config/backups" "/mnt/share/media/backups/tautulli/"
|
||||
# Service-specific backup destinations
|
||||
declare -A BACKUP_DESTINATIONS=(
|
||||
["sonarr"]="${BACKUP_ROOT}/sonarr/"
|
||||
["radarr"]="${BACKUP_ROOT}/radarr/"
|
||||
["prowlarr"]="${BACKUP_ROOT}/prowlarr/"
|
||||
["audiobookshelf"]="${BACKUP_ROOT}/audiobookshelf/"
|
||||
["tautulli"]="${BACKUP_ROOT}/tautulli/"
|
||||
["sabnzbd"]="${BACKUP_ROOT}/sabnzbd/sabnzbd_$(date +%Y%m%d).ini"
|
||||
["jellyseerr_db"]="${BACKUP_ROOT}/jellyseerr/backup_$(date +%Y%m%d)/"
|
||||
["jellyseerr_settings"]="${BACKUP_ROOT}/jellyseerr/backup_$(date +%Y%m%d)/"
|
||||
)
|
||||
|
||||
docker cp sabnzbd:/config/sabnzbd.ini /mnt/share/media/backups/sabnzbd/sabnzbd_$(date +%Y%m%d).ini
|
||||
log_file_details "sabnzbd:/config/sabnzbd.ini" "/mnt/share/media/backups/sabnzbd/sabnzbd_$(date +%Y%m%d).ini"
|
||||
# Show help function
|
||||
show_help() {
|
||||
cat << EOF
|
||||
Media Services Backup Script
|
||||
|
||||
mkdir -p /mnt/share/media/backups/jellyseerr/backup_$(date +%Y%m%d)
|
||||
docker cp jellyseerr:/config/db/ /mnt/share/media/backups/jellyseerr/backup_$(date +%Y%m%d)/
|
||||
log_file_details "jellyseerr:/config/db/" "/mnt/share/media/backups/jellyseerr/backup_$(date +%Y%m%d)/"
|
||||
Usage: $0 [OPTIONS]
|
||||
|
||||
docker cp jellyseerr:/config/settings.json /mnt/share/media/backups/jellyseerr/backup_$(date +%Y%m%d)/
|
||||
log_file_details "jellyseerr:/config/settings.json" "/mnt/share/media/backups/jellyseerr/backup_$(date +%Y%m%d)/"
|
||||
OPTIONS:
|
||||
--dry-run Show what would be backed up without actually doing it
|
||||
--no-verify Skip backup verification
|
||||
--sequential Run backups sequentially instead of in parallel
|
||||
--interactive Ask for confirmation before each backup
|
||||
--webhook URL Custom webhook URL for notifications
|
||||
-h, --help Show this help message
|
||||
|
||||
# send notification upon completion
|
||||
curl \
|
||||
-H tags:popcorn,backup,sonarr,radarr,prowlarr,sabnzbd,audiobookshelf,tautulli,jellyseerr,${HOSTNAME} \
|
||||
-d "A backup of media-related databases has been saved to the /media/backups folder" \
|
||||
https://notify.peterwood.rocks/lab
|
||||
EXAMPLES:
|
||||
$0 # Run full backup with default settings
|
||||
$0 --dry-run # Preview what would be backed up
|
||||
$0 --sequential # Run backups one at a time
|
||||
$0 --no-verify # Skip verification for faster backup
|
||||
|
||||
SERVICES BACKED UP:
|
||||
- Sonarr (TV Shows)
|
||||
- Radarr (Movies)
|
||||
- Prowlarr (Indexers)
|
||||
- Audiobookshelf (Audiobooks)
|
||||
- Tautulli (Plex Statistics)
|
||||
- SABnzbd (Downloads)
|
||||
- Jellyseerr (Requests)
|
||||
|
||||
EOF
|
||||
}
|
||||
|
||||
# Logging functions
|
||||
log_message() {
|
||||
local message="$1"
|
||||
local timestamp=$(date '+%Y-%m-%d %H:%M:%S')
|
||||
echo -e "${CYAN}[${timestamp}]${NC} ${message}"
|
||||
echo "[$(date '+%Y-%m-%d %H:%M:%S')] $message" >> "${LOG_FILE}" 2>/dev/null || true
|
||||
}
|
||||
|
||||
log_error() {
|
||||
local message="$1"
|
||||
local timestamp=$(date '+%Y-%m-%d %H:%M:%S')
|
||||
echo -e "${RED}[${timestamp}] ERROR:${NC} ${message}" >&2
|
||||
echo "[$(date '+%Y-%m-%d %H:%M:%S')] ERROR: $message" >> "${LOG_FILE}" 2>/dev/null || true
|
||||
}
|
||||
|
||||
log_success() {
|
||||
local message="$1"
|
||||
local timestamp=$(date '+%Y-%m-%d %H:%M:%S')
|
||||
echo -e "${GREEN}[${timestamp}] SUCCESS:${NC} ${message}"
|
||||
echo "[$(date '+%Y-%m-%d %H:%M:%S')] SUCCESS: $message" >> "${LOG_FILE}" 2>/dev/null || true
|
||||
}
|
||||
|
||||
log_warning() {
|
||||
local message="$1"
|
||||
local timestamp=$(date '+%Y-%m-%d %H:%M:%S')
|
||||
echo -e "${YELLOW}[${timestamp}] WARNING:${NC} ${message}"
|
||||
echo "[$(date '+%Y-%m-%d %H:%M:%S')] WARNING: $message" >> "${LOG_FILE}" 2>/dev/null || true
|
||||
}
|
||||
|
||||
log_info() {
|
||||
local message="$1"
|
||||
local timestamp=$(date '+%Y-%m-%d %H:%M:%S')
|
||||
echo -e "${BLUE}[${timestamp}] INFO:${NC} ${message}"
|
||||
echo "[$(date '+%Y-%m-%d %H:%M:%S')] INFO: $message" >> "${LOG_FILE}" 2>/dev/null || true
|
||||
}
|
||||
|
||||
# Performance tracking functions
|
||||
track_performance() {
|
||||
if [ "$PERFORMANCE_MONITORING" != true ]; then
|
||||
return 0
|
||||
fi
|
||||
|
||||
local operation="$1"
|
||||
local start_time="$2"
|
||||
local end_time="${3:-$(date +%s)}"
|
||||
local duration=$((end_time - start_time))
|
||||
|
||||
# Initialize performance log if it doesn't exist
|
||||
if [ ! -f "$PERFORMANCE_LOG_FILE" ]; then
|
||||
echo "[]" > "$PERFORMANCE_LOG_FILE"
|
||||
fi
|
||||
|
||||
# Add performance entry with lock protection
|
||||
local entry=$(jq -n \
|
||||
--arg timestamp "$(date -Iseconds)" \
|
||||
--arg operation "$operation" \
|
||||
--arg duration "$duration" \
|
||||
--arg hostname "$(hostname)" \
|
||||
'{
|
||||
timestamp: $timestamp,
|
||||
operation: $operation,
|
||||
duration: ($duration | tonumber),
|
||||
hostname: $hostname
|
||||
}')
|
||||
|
||||
if command -v jq > /dev/null 2>&1; then
|
||||
local lock_file="${PERFORMANCE_LOG_FILE}.lock"
|
||||
local max_wait=10
|
||||
local wait_count=0
|
||||
|
||||
while [ $wait_count -lt $max_wait ]; do
|
||||
if (set -C; echo $$ > "$lock_file") 2>/dev/null; then
|
||||
break
|
||||
fi
|
||||
sleep 0.1
|
||||
((wait_count++))
|
||||
done
|
||||
|
||||
if [ $wait_count -lt $max_wait ]; then
|
||||
if jq --argjson entry "$entry" '. += [$entry]' "$PERFORMANCE_LOG_FILE" > "${PERFORMANCE_LOG_FILE}.tmp" 2>/dev/null; then
|
||||
mv "${PERFORMANCE_LOG_FILE}.tmp" "$PERFORMANCE_LOG_FILE"
|
||||
else
|
||||
rm -f "${PERFORMANCE_LOG_FILE}.tmp"
|
||||
fi
|
||||
rm -f "$lock_file"
|
||||
fi
|
||||
fi
|
||||
|
||||
log_info "Performance: $operation completed in ${duration}s"
|
||||
}
|
||||
|
||||
# Initialize JSON log file
|
||||
initialize_json_log() {
|
||||
if [ ! -f "${JSON_LOG_FILE}" ] || ! jq empty "${JSON_LOG_FILE}" 2>/dev/null; then
|
||||
echo "{}" > "${JSON_LOG_FILE}"
|
||||
log_message "Initialized JSON log file"
|
||||
fi
|
||||
}
|
||||
|
||||
# Enhanced function to log file details with markdown formatting
|
||||
log_file_details() {
|
||||
local service="$1"
|
||||
local src="$2"
|
||||
local dest="$3"
|
||||
local status="$4"
|
||||
local size=""
|
||||
local checksum=""
|
||||
|
||||
# Calculate size if backup was successful
|
||||
if [ "$status" == "SUCCESS" ] && [ -e "$dest" ]; then
|
||||
size=$(du -sh "$dest" 2>/dev/null | cut -f1 || echo "Unknown")
|
||||
if [ "$VERIFY_BACKUPS" == true ]; then
|
||||
checksum=$(find "$dest" -type f -exec md5sum {} \; 2>/dev/null | md5sum | cut -d' ' -f1 || echo "N/A")
|
||||
fi
|
||||
else
|
||||
size="N/A"
|
||||
checksum="N/A"
|
||||
fi
|
||||
|
||||
# Use a lock file for markdown log to prevent race conditions
|
||||
local markdown_lock="${MARKDOWN_LOG}.lock"
|
||||
local max_wait=30
|
||||
local wait_count=0
|
||||
|
||||
while [ $wait_count -lt $max_wait ]; do
|
||||
if (set -C; echo $$ > "$markdown_lock") 2>/dev/null; then
|
||||
break
|
||||
fi
|
||||
sleep 0.1
|
||||
((wait_count++))
|
||||
done
|
||||
|
||||
if [ $wait_count -lt $max_wait ]; then
|
||||
# Log to markdown file safely
|
||||
{
|
||||
echo "## $service Backup"
|
||||
echo "- **Status**: $status"
|
||||
echo "- **Source**: \`$src\`"
|
||||
echo "- **Destination**: \`$dest\`"
|
||||
echo "- **Size**: $size"
|
||||
echo "- **Checksum**: $checksum"
|
||||
echo "- **Timestamp**: $(date '+%Y-%m-%d %H:%M:%S')"
|
||||
echo ""
|
||||
} >> "$MARKDOWN_LOG"
|
||||
|
||||
rm -f "$markdown_lock"
|
||||
else
|
||||
log_warning "Could not acquire markdown log lock for $service"
|
||||
fi
|
||||
|
||||
# Log to JSON
|
||||
if command -v jq > /dev/null 2>&1; then
|
||||
update_backup_log "$service" "$src" "$dest" "$status" "$size" "$checksum"
|
||||
fi
|
||||
}
|
||||
|
||||
# Update backup log in JSON format
|
||||
update_backup_log() {
|
||||
local service="$1"
|
||||
local src="$2"
|
||||
local dest="$3"
|
||||
local status="$4"
|
||||
local size="$5"
|
||||
local checksum="$6"
|
||||
local timestamp=$(date -Iseconds)
|
||||
|
||||
if ! command -v jq > /dev/null 2>&1; then
|
||||
return 0
|
||||
fi
|
||||
|
||||
# Use a lock file for parallel safety
|
||||
local lock_file="${JSON_LOG_FILE}.lock"
|
||||
local max_wait=30
|
||||
local wait_count=0
|
||||
|
||||
while [ $wait_count -lt $max_wait ]; do
|
||||
if (set -C; echo $$ > "$lock_file") 2>/dev/null; then
|
||||
break
|
||||
fi
|
||||
sleep 0.1
|
||||
((wait_count++))
|
||||
done
|
||||
|
||||
if [ $wait_count -ge $max_wait ]; then
|
||||
log_warning "Could not acquire lock for JSON log update"
|
||||
return 1
|
||||
fi
|
||||
|
||||
# Create entry for this backup
|
||||
local entry=$(jq -n \
|
||||
--arg service "$service" \
|
||||
--arg src "$src" \
|
||||
--arg dest "$dest" \
|
||||
--arg status "$status" \
|
||||
--arg size "$size" \
|
||||
--arg checksum "$checksum" \
|
||||
--arg timestamp "$timestamp" \
|
||||
'{
|
||||
service: $service,
|
||||
source: $src,
|
||||
destination: $dest,
|
||||
status: $status,
|
||||
size: $size,
|
||||
checksum: $checksum,
|
||||
timestamp: $timestamp
|
||||
}')
|
||||
|
||||
# Update JSON log safely
|
||||
if jq --argjson entry "$entry" --arg service "$service" \
|
||||
'.[$service] = $entry' "$JSON_LOG_FILE" > "${JSON_LOG_FILE}.tmp" 2>/dev/null; then
|
||||
mv "${JSON_LOG_FILE}.tmp" "$JSON_LOG_FILE"
|
||||
else
|
||||
rm -f "${JSON_LOG_FILE}.tmp"
|
||||
fi
|
||||
|
||||
# Remove lock file
|
||||
rm -f "$lock_file"
|
||||
}
|
||||
|
||||
# Check if Docker container is running
|
||||
check_container_running() {
|
||||
local container="$1"
|
||||
|
||||
if ! docker ps --format "table {{.Names}}" | grep -q "^${container}$"; then
|
||||
log_warning "Container '$container' is not running"
|
||||
return 1
|
||||
fi
|
||||
|
||||
return 0
|
||||
}
|
||||
|
||||
# Verify backup integrity
|
||||
verify_backup() {
|
||||
local src_container="$1"
|
||||
local src_path="$2"
|
||||
local dest_path="$3"
|
||||
|
||||
if [ "$VERIFY_BACKUPS" != true ]; then
|
||||
return 0
|
||||
fi
|
||||
|
||||
log_info "Verifying backup integrity for $src_container:$src_path"
|
||||
|
||||
# For files, compare checksums
|
||||
if [[ "$src_path" == *.ini ]] || [[ "$src_path" == *.json ]]; then
|
||||
local src_checksum=$(docker exec "$src_container" md5sum "$src_path" 2>/dev/null | cut -d' ' -f1 || echo "")
|
||||
local dest_checksum=$(md5sum "$dest_path" 2>/dev/null | cut -d' ' -f1 || echo "")
|
||||
|
||||
if [ -n "$src_checksum" ] && [ -n "$dest_checksum" ] && [ "$src_checksum" == "$dest_checksum" ]; then
|
||||
log_success "Backup verification passed for $src_container:$src_path"
|
||||
return 0
|
||||
else
|
||||
log_error "Backup verification failed for $src_container:$src_path"
|
||||
return 1
|
||||
fi
|
||||
fi
|
||||
|
||||
# For directories, check if they exist and have content
|
||||
if [ -d "$dest_path" ]; then
|
||||
local file_count=$(find "$dest_path" -type f 2>/dev/null | wc -l)
|
||||
if [ "$file_count" -gt 0 ]; then
|
||||
log_success "Backup verification passed for $src_container:$src_path ($file_count files)"
|
||||
return 0
|
||||
else
|
||||
log_error "Backup verification failed: no files found in $dest_path"
|
||||
return 1
|
||||
fi
|
||||
fi
|
||||
|
||||
log_warning "Unable to verify backup for $src_container:$src_path"
|
||||
return 0
|
||||
}
|
||||
|
||||
# Backup a single service
|
||||
backup_service() {
|
||||
local service="$1"
|
||||
local container="$1"
|
||||
local backup_start_time=$(date +%s)
|
||||
|
||||
log_message "Starting backup for service: $service"
|
||||
|
||||
# Handle special cases for container names
|
||||
case "$service" in
|
||||
jellyseerr_db|jellyseerr_settings)
|
||||
container="jellyseerr"
|
||||
;;
|
||||
esac
|
||||
|
||||
# Check if container is running
|
||||
if ! check_container_running "$container"; then
|
||||
log_file_details "$service" "${container}:${MEDIA_SERVICES[$service]}" "${BACKUP_DESTINATIONS[$service]}" "FAILED - Container not running"
|
||||
return 1
|
||||
fi
|
||||
|
||||
local src_path="${MEDIA_SERVICES[$service]}"
|
||||
local dest_path="${BACKUP_DESTINATIONS[$service]}"
|
||||
|
||||
# Create destination directory for jellyseerr
|
||||
if [[ "$service" == jellyseerr_* ]]; then
|
||||
mkdir -p "$(dirname "$dest_path")"
|
||||
fi
|
||||
|
||||
# Perform the backup
|
||||
if [ "$DRY_RUN" == true ]; then
|
||||
log_info "DRY RUN: Would backup $container:$src_path to $dest_path"
|
||||
log_file_details "$service" "$container:$src_path" "$dest_path" "DRY RUN"
|
||||
return 0
|
||||
fi
|
||||
|
||||
if [ "$INTERACTIVE_MODE" == true ]; then
|
||||
echo -n "Backup $service? (y/N): "
|
||||
read -r response
|
||||
if [[ ! "$response" =~ ^[Yy]$ ]]; then
|
||||
log_info "Skipping $service backup (user choice)"
|
||||
return 0
|
||||
fi
|
||||
fi
|
||||
|
||||
# Execute docker cp command
|
||||
local docker_cmd="docker cp $container:$src_path $dest_path"
|
||||
log_info "Executing: $docker_cmd"
|
||||
|
||||
if $docker_cmd 2>&1 | tee -a "$LOG_FILE"; then
|
||||
log_success "Backup completed for $service"
|
||||
|
||||
# Verify the backup
|
||||
if verify_backup "$container" "$src_path" "$dest_path"; then
|
||||
log_file_details "$service" "$container:$src_path" "$dest_path" "SUCCESS"
|
||||
track_performance "backup_${service}" "$backup_start_time"
|
||||
return 0
|
||||
else
|
||||
log_file_details "$service" "$container:$src_path" "$dest_path" "VERIFICATION_FAILED"
|
||||
return 1
|
||||
fi
|
||||
else
|
||||
log_error "Backup failed for $service"
|
||||
log_file_details "$service" "$container:$src_path" "$dest_path" "FAILED"
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
# Backup service wrapper for parallel execution
|
||||
backup_service_wrapper() {
|
||||
local service="$1"
|
||||
local temp_file="$2"
|
||||
|
||||
if backup_service "$service"; then
|
||||
echo "SUCCESS:$service" >> "$temp_file"
|
||||
else
|
||||
echo "FAILED:$service" >> "$temp_file"
|
||||
fi
|
||||
}
|
||||
|
||||
# Clean old backups based on age and count
|
||||
cleanup_old_backups() {
|
||||
log_message "Cleaning up old backups..."
|
||||
|
||||
for service_dir in "${BACKUP_ROOT}"/*; do
|
||||
if [ ! -d "$service_dir" ]; then
|
||||
continue
|
||||
fi
|
||||
|
||||
local service=$(basename "$service_dir")
|
||||
log_info "Cleaning up old backups for $service"
|
||||
|
||||
# Remove backups older than MAX_BACKUP_AGE_DAYS
|
||||
find "$service_dir" -type f -mtime +${MAX_BACKUP_AGE_DAYS} -delete 2>/dev/null || true
|
||||
find "$service_dir" -type d -empty -mtime +${MAX_BACKUP_AGE_DAYS} -delete 2>/dev/null || true
|
||||
|
||||
# Keep only the most recent MAX_BACKUPS_TO_KEEP backups
|
||||
find "$service_dir" -type f -name "*.ini" -o -name "*.json" | sort -r | tail -n +$((MAX_BACKUPS_TO_KEEP + 1)) | xargs rm -f 2>/dev/null || true
|
||||
|
||||
# Clean up old dated directories (for jellyseerr)
|
||||
find "$service_dir" -type d -name "backup_*" | sort -r | tail -n +$((MAX_BACKUPS_TO_KEEP + 1)) | xargs rm -rf 2>/dev/null || true
|
||||
done
|
||||
|
||||
# Clean up old log files
|
||||
find "$LOG_ROOT" -name "media-backup-*.log" -mtime +${MAX_BACKUP_AGE_DAYS} -delete 2>/dev/null || true
|
||||
find "$LOG_ROOT" -name "media-backup-*.md" -mtime +${MAX_BACKUP_AGE_DAYS} -delete 2>/dev/null || true
|
||||
|
||||
log_success "Cleanup completed"
|
||||
}
|
||||
|
||||
# Check disk space
|
||||
check_disk_space() {
|
||||
local required_space_mb=1000 # Minimum 1GB free space
|
||||
|
||||
local available_space_kb=$(df "$BACKUP_ROOT" | awk 'NR==2 {print $4}')
|
||||
local available_space_mb=$((available_space_kb / 1024))
|
||||
|
||||
if [ "$available_space_mb" -lt "$required_space_mb" ]; then
|
||||
log_error "Insufficient disk space. Available: ${available_space_mb}MB, Required: ${required_space_mb}MB"
|
||||
return 1
|
||||
fi
|
||||
|
||||
log_info "Disk space check passed. Available: ${available_space_mb}MB"
|
||||
return 0
|
||||
}
|
||||
|
||||
# Send enhanced notification
|
||||
send_notification() {
|
||||
local title="$1"
|
||||
local message="$2"
|
||||
local status="${3:-info}"
|
||||
local hostname=$(hostname)
|
||||
local total_services=${#MEDIA_SERVICES[@]}
|
||||
local success_count="$4"
|
||||
local failed_count="$5"
|
||||
|
||||
# Enhanced message with statistics
|
||||
local enhanced_message="$message\n\nServices: $total_services\nSuccessful: $success_count\nFailed: $failed_count\nHost: $hostname"
|
||||
|
||||
# Console notification
|
||||
case "$status" in
|
||||
"success") log_success "$title: $enhanced_message" ;;
|
||||
"error") log_error "$title: $enhanced_message" ;;
|
||||
"warning") log_warning "$title: $enhanced_message" ;;
|
||||
*) log_info "$title: $enhanced_message" ;;
|
||||
esac
|
||||
|
||||
# Webhook notification
|
||||
if [ -n "$WEBHOOK_URL" ] && [ "$DRY_RUN" != true ]; then
|
||||
local tags="backup,media,${hostname}"
|
||||
[ "$failed_count" -gt 0 ] && tags="${tags},errors"
|
||||
|
||||
curl -s \
|
||||
-H "tags:${tags}" \
|
||||
-d "$enhanced_message" \
|
||||
"$WEBHOOK_URL" 2>/dev/null || log_warning "Failed to send webhook notification"
|
||||
fi
|
||||
}
|
||||
|
||||
# Generate backup summary report
|
||||
generate_summary_report() {
|
||||
local success_count="$1"
|
||||
local failed_count="$2"
|
||||
local total_time="$3"
|
||||
|
||||
log_message "=== BACKUP SUMMARY REPORT ==="
|
||||
log_message "Total Services: ${#MEDIA_SERVICES[@]}"
|
||||
log_message "Successful Backups: $success_count"
|
||||
log_message "Failed Backups: $failed_count"
|
||||
log_message "Total Time: ${total_time}s"
|
||||
log_message "Log File: $LOG_FILE"
|
||||
log_message "Markdown Report: $MARKDOWN_LOG"
|
||||
|
||||
if [ "$PERFORMANCE_MONITORING" == true ]; then
|
||||
log_message "Performance Log: $PERFORMANCE_LOG_FILE"
|
||||
fi
|
||||
|
||||
# Add summary to markdown log
|
||||
{
|
||||
echo "# Media Backup Summary Report"
|
||||
echo "**Date**: $(date '+%Y-%m-%d %H:%M:%S')"
|
||||
echo "**Host**: $(hostname)"
|
||||
echo "**Total Services**: ${#MEDIA_SERVICES[@]}"
|
||||
echo "**Successful**: $success_count"
|
||||
echo "**Failed**: $failed_count"
|
||||
echo "**Duration**: ${total_time}s"
|
||||
echo ""
|
||||
} >> "$MARKDOWN_LOG"
|
||||
}
|
||||
|
||||
# Main backup execution function
|
||||
main() {
|
||||
local script_start_time=$(date +%s)
|
||||
|
||||
log_message "=== MEDIA SERVICES BACKUP STARTED ==="
|
||||
log_message "Host: $(hostname)"
|
||||
log_message "Timestamp: $(date '+%Y-%m-%d %H:%M:%S')"
|
||||
log_message "Dry Run: $DRY_RUN"
|
||||
log_message "Parallel Mode: $PARALLEL_BACKUPS"
|
||||
log_message "Verify Backups: $VERIFY_BACKUPS"
|
||||
|
||||
# Initialize logging
|
||||
initialize_json_log
|
||||
|
||||
# Initialize markdown log
|
||||
{
|
||||
echo "# Media Services Backup Report"
|
||||
echo "**Started**: $(date '+%Y-%m-%d %H:%M:%S')"
|
||||
echo "**Host**: $(hostname)"
|
||||
echo ""
|
||||
} > "$MARKDOWN_LOG"
|
||||
|
||||
# Pre-flight checks
|
||||
if ! check_disk_space; then
|
||||
send_notification "Media Backup Failed" "Insufficient disk space" "error" 0 1
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Check if Docker is running
|
||||
if ! docker info >/dev/null 2>&1; then
|
||||
log_error "Docker is not running or accessible"
|
||||
send_notification "Media Backup Failed" "Docker is not accessible" "error" 0 1
|
||||
exit 1
|
||||
fi
|
||||
|
||||
local success_count=0
|
||||
local failed_count=0
|
||||
local backup_results=()
|
||||
|
||||
if [ "$PARALLEL_BACKUPS" == true ]; then
|
||||
log_message "Running backups in parallel mode"
|
||||
|
||||
# Create temporary file for collecting results
|
||||
local temp_results=$(mktemp)
|
||||
local pids=()
|
||||
|
||||
# Start backup jobs in parallel
|
||||
for service in "${!MEDIA_SERVICES[@]}"; do
|
||||
backup_service_wrapper "$service" "$temp_results" &
|
||||
pids+=($!)
|
||||
log_info "Started backup job for $service (PID: $!)"
|
||||
done
|
||||
|
||||
# Wait for all jobs to complete
|
||||
for pid in "${pids[@]}"; do
|
||||
wait "$pid"
|
||||
log_info "Backup job completed (PID: $pid)"
|
||||
done
|
||||
|
||||
# Collect results
|
||||
while IFS= read -r result; do
|
||||
if [[ "$result" == SUCCESS:* ]]; then
|
||||
((success_count++))
|
||||
backup_results+=("✓ ${result#SUCCESS:}")
|
||||
elif [[ "$result" == FAILED:* ]]; then
|
||||
((failed_count++))
|
||||
backup_results+=("✗ ${result#FAILED:}")
|
||||
fi
|
||||
done < "$temp_results"
|
||||
|
||||
rm -f "$temp_results"
|
||||
|
||||
else
|
||||
log_message "Running backups in sequential mode"
|
||||
|
||||
# Run backups sequentially
|
||||
for service in "${!MEDIA_SERVICES[@]}"; do
|
||||
if backup_service "$service"; then
|
||||
((success_count++))
|
||||
backup_results+=("✓ $service")
|
||||
else
|
||||
((failed_count++))
|
||||
backup_results+=("✗ $service")
|
||||
fi
|
||||
done
|
||||
fi
|
||||
|
||||
# Calculate total time
|
||||
local script_end_time=$(date +%s)
|
||||
local total_time=$((script_end_time - script_start_time))
|
||||
|
||||
# Track overall performance
|
||||
track_performance "full_media_backup" "$script_start_time" "$script_end_time"
|
||||
|
||||
# Clean up old backups (only if not dry run)
|
||||
if [ "$DRY_RUN" != true ]; then
|
||||
cleanup_old_backups
|
||||
fi
|
||||
|
||||
# Generate summary report
|
||||
generate_summary_report "$success_count" "$failed_count" "$total_time"
|
||||
|
||||
# Add results to markdown log
|
||||
{
|
||||
echo "## Backup Results"
|
||||
for result in "${backup_results[@]}"; do
|
||||
echo "- $result"
|
||||
done
|
||||
echo ""
|
||||
echo "**Completed**: $(date '+%Y-%m-%d %H:%M:%S')"
|
||||
echo "**Duration**: ${total_time}s"
|
||||
} >> "$MARKDOWN_LOG"
|
||||
|
||||
# Send notification
|
||||
local status="success"
|
||||
local message="Media backup completed"
|
||||
|
||||
if [ "$failed_count" -gt 0 ]; then
|
||||
status="warning"
|
||||
message="Media backup completed with $failed_count failures"
|
||||
fi
|
||||
|
||||
if [ "$DRY_RUN" == true ]; then
|
||||
message="Media backup dry run completed"
|
||||
status="info"
|
||||
fi
|
||||
|
||||
send_notification "Media Backup Complete" "$message" "$status" "$success_count" "$failed_count"
|
||||
|
||||
# Exit with error code if any backups failed
|
||||
if [ "$failed_count" -gt 0 ]; then
|
||||
exit 1
|
||||
fi
|
||||
|
||||
log_success "All media backups completed successfully!"
|
||||
exit 0
|
||||
}
|
||||
|
||||
# Trap to handle script interruption
|
||||
trap 'log_error "Script interrupted"; exit 130' INT TERM
|
||||
|
||||
# Run main function
|
||||
main "$@"
|
||||
|
||||
Reference in New Issue
Block a user