mirror of
https://github.com/acedanger/shell.git
synced 2025-12-06 00:00:13 -08:00
Refactor variable assignments and improve script readability in validate-plex-backups.sh and validate-plex-recovery.sh
- Changed inline variable assignments to separate declaration and assignment for clarity. - Updated condition checks and log messages for better readability and consistency. - Added a backup of validate-plex-recovery.sh for safety. - Introduced a new script run-docker-tests.sh for testing setup in Docker containers. - Enhanced ssh-login.sh to improve condition checks and logging functionality.
This commit is contained in:
257
backup-media.sh
257
backup-media.sh
@@ -11,10 +11,6 @@ CYAN='\033[0;36m'
|
||||
NC='\033[0m' # No Color
|
||||
|
||||
# Performance tracking variables
|
||||
SCRIPT_START_TIME=$(date +%s)
|
||||
BACKUP_START_TIME=""
|
||||
VERIFICATION_START_TIME=""
|
||||
|
||||
# Configuration
|
||||
MAX_BACKUP_AGE_DAYS=30
|
||||
MAX_BACKUPS_TO_KEEP=10
|
||||
@@ -132,71 +128,43 @@ declare -A BACKUP_DESTINATIONS=(
|
||||
["jellyseerr_settings"]="${BACKUP_ROOT}/jellyseerr/backup_$(date +%Y%m%d)/"
|
||||
)
|
||||
|
||||
# Show help function
|
||||
show_help() {
|
||||
cat << EOF
|
||||
Media Services Backup Script
|
||||
|
||||
Usage: $0 [OPTIONS]
|
||||
|
||||
OPTIONS:
|
||||
--dry-run Show what would be backed up without actually doing it
|
||||
--no-verify Skip backup verification
|
||||
--sequential Run backups sequentially instead of in parallel
|
||||
--interactive Ask for confirmation before each backup
|
||||
--webhook URL Custom webhook URL for notifications
|
||||
-h, --help Show this help message
|
||||
|
||||
EXAMPLES:
|
||||
$0 # Run full backup with default settings
|
||||
$0 --dry-run # Preview what would be backed up
|
||||
$0 --sequential # Run backups one at a time
|
||||
$0 --no-verify # Skip verification for faster backup
|
||||
|
||||
SERVICES BACKED UP:
|
||||
- Sonarr (TV Shows)
|
||||
- Radarr (Movies)
|
||||
- Prowlarr (Indexers)
|
||||
- Audiobookshelf (Audiobooks)
|
||||
- Tautulli (Plex Statistics)
|
||||
- SABnzbd (Downloads)
|
||||
- Jellyseerr (Requests)
|
||||
|
||||
EOF
|
||||
}
|
||||
|
||||
# Logging functions
|
||||
log_message() {
|
||||
local message="$1"
|
||||
local timestamp=$(date '+%Y-%m-%d %H:%M:%S')
|
||||
local timestamp
|
||||
timestamp=$(date '+%Y-%m-%d %H:%M:%S')
|
||||
echo -e "${CYAN}[${timestamp}]${NC} ${message}"
|
||||
echo "[${timestamp}] $message" >> "${LOG_FILE}" 2>/dev/null || true
|
||||
}
|
||||
|
||||
log_error() {
|
||||
local message="$1"
|
||||
local timestamp=$(date '+%Y-%m-%d %H:%M:%S')
|
||||
local timestamp
|
||||
timestamp=$(date '+%Y-%m-%d %H:%M:%S')
|
||||
echo -e "${RED}[${timestamp}] ERROR:${NC} ${message}" >&2
|
||||
echo "[${timestamp}] ERROR: $message" >> "${LOG_FILE}" 2>/dev/null || true
|
||||
}
|
||||
|
||||
log_success() {
|
||||
local message="$1"
|
||||
local timestamp=$(date '+%Y-%m-%d %H:%M:%S')
|
||||
local timestamp
|
||||
timestamp=$(date '+%Y-%m-%d %H:%M:%S')
|
||||
echo -e "${GREEN}[${timestamp}] SUCCESS:${NC} ${message}"
|
||||
echo "[${timestamp}] SUCCESS: $message" >> "${LOG_FILE}" 2>/dev/null || true
|
||||
}
|
||||
|
||||
log_warning() {
|
||||
local message="$1"
|
||||
local timestamp=$(date '+%Y-%m-%d %H:%M:%S')
|
||||
local timestamp
|
||||
timestamp=$(date '+%Y-%m-%d %H:%M:%S')
|
||||
echo -e "${YELLOW}[${timestamp}] WARNING:${NC} ${message}"
|
||||
echo "[${timestamp}] WARNING: $message" >> "${LOG_FILE}" 2>/dev/null || true
|
||||
}
|
||||
|
||||
log_info() {
|
||||
local message="$1"
|
||||
local timestamp=$(date '+%Y-%m-%d %H:%M:%S')
|
||||
local timestamp
|
||||
timestamp=$(date '+%Y-%m-%d %H:%M:%S')
|
||||
echo -e "${BLUE}[${timestamp}] INFO:${NC} ${message}"
|
||||
echo "[${timestamp}] INFO: $message" >> "${LOG_FILE}" 2>/dev/null || true
|
||||
}
|
||||
@@ -206,19 +174,20 @@ track_performance() {
|
||||
if [ "$PERFORMANCE_MONITORING" != true ]; then
|
||||
return 0
|
||||
fi
|
||||
|
||||
|
||||
local operation="$1"
|
||||
local start_time="$2"
|
||||
local end_time="${3:-$(date +%s)}"
|
||||
local duration=$((end_time - start_time))
|
||||
|
||||
|
||||
# Initialize performance log if it doesn't exist
|
||||
if [ ! -f "$PERFORMANCE_LOG_FILE" ]; then
|
||||
echo "[]" > "$PERFORMANCE_LOG_FILE"
|
||||
fi
|
||||
|
||||
|
||||
# Add performance entry with lock protection
|
||||
local entry=$(jq -n \
|
||||
local entry
|
||||
entry=$(jq -n \
|
||||
--arg timestamp "$(date -Iseconds)" \
|
||||
--arg operation "$operation" \
|
||||
--arg duration "$duration" \
|
||||
@@ -229,12 +198,12 @@ track_performance() {
|
||||
duration: ($duration | tonumber),
|
||||
hostname: $hostname
|
||||
}')
|
||||
|
||||
|
||||
if command -v jq > /dev/null 2>&1; then
|
||||
local lock_file="${PERFORMANCE_LOG_FILE}.lock"
|
||||
local max_wait=10
|
||||
local wait_count=0
|
||||
|
||||
|
||||
while [ $wait_count -lt $max_wait ]; do
|
||||
if (set -C; echo $$ > "$lock_file") 2>/dev/null; then
|
||||
break
|
||||
@@ -242,7 +211,7 @@ track_performance() {
|
||||
sleep 0.1
|
||||
((wait_count++))
|
||||
done
|
||||
|
||||
|
||||
if [ $wait_count -lt $max_wait ]; then
|
||||
if jq --argjson entry "$entry" '. += [$entry]' "$PERFORMANCE_LOG_FILE" > "${PERFORMANCE_LOG_FILE}.tmp" 2>/dev/null; then
|
||||
mv "${PERFORMANCE_LOG_FILE}.tmp" "$PERFORMANCE_LOG_FILE"
|
||||
@@ -252,7 +221,7 @@ track_performance() {
|
||||
rm -f "$lock_file"
|
||||
fi
|
||||
fi
|
||||
|
||||
|
||||
log_info "Performance: $operation completed in ${duration}s"
|
||||
}
|
||||
|
||||
@@ -272,7 +241,7 @@ log_file_details() {
|
||||
local status="$4"
|
||||
local size=""
|
||||
local checksum=""
|
||||
|
||||
|
||||
# Calculate size if backup was successful
|
||||
if [ "$status" == "SUCCESS" ] && [ -e "$dest" ]; then
|
||||
size=$(du -sh "$dest" 2>/dev/null | cut -f1 || echo "Unknown")
|
||||
@@ -283,12 +252,12 @@ log_file_details() {
|
||||
size="N/A"
|
||||
checksum="N/A"
|
||||
fi
|
||||
|
||||
|
||||
# Use a lock file for markdown log to prevent race conditions
|
||||
local markdown_lock="${MARKDOWN_LOG}.lock"
|
||||
local max_wait=30
|
||||
local wait_count=0
|
||||
|
||||
|
||||
while [ $wait_count -lt $max_wait ]; do
|
||||
if (set -C; echo $$ > "$markdown_lock") 2>/dev/null; then
|
||||
break
|
||||
@@ -296,25 +265,25 @@ log_file_details() {
|
||||
sleep 0.1
|
||||
((wait_count++))
|
||||
done
|
||||
|
||||
|
||||
if [ $wait_count -lt $max_wait ]; then
|
||||
# Log to markdown file safely
|
||||
{
|
||||
echo "## $service Backup"
|
||||
echo "- **Status**: $status"
|
||||
echo "- **Source**: \`$src\`"
|
||||
echo "- **Destination**: \`$dest\`"
|
||||
echo "- **Source**: \$($src\)"
|
||||
echo "- **Destination**: \$($dest\)"
|
||||
echo "- **Size**: $size"
|
||||
echo "- **Checksum**: $checksum"
|
||||
echo "- **Timestamp**: $(date '+%Y-%m-%d %H:%M:%S')"
|
||||
echo ""
|
||||
} >> "$MARKDOWN_LOG"
|
||||
|
||||
|
||||
rm -f "$markdown_lock"
|
||||
else
|
||||
log_warning "Could not acquire markdown log lock for $service"
|
||||
fi
|
||||
|
||||
|
||||
# Log to JSON
|
||||
if command -v jq > /dev/null 2>&1; then
|
||||
update_backup_log "$service" "$src" "$dest" "$status" "$size" "$checksum"
|
||||
@@ -329,17 +298,18 @@ update_backup_log() {
|
||||
local status="$4"
|
||||
local size="$5"
|
||||
local checksum="$6"
|
||||
local timestamp=$(date -Iseconds)
|
||||
|
||||
local timestamp
|
||||
timestamp=$(date -Iseconds)
|
||||
|
||||
if ! command -v jq > /dev/null 2>&1; then
|
||||
return 0
|
||||
fi
|
||||
|
||||
|
||||
# Use a lock file for parallel safety
|
||||
local lock_file="${JSON_LOG_FILE}.lock"
|
||||
local max_wait=30
|
||||
local wait_count=0
|
||||
|
||||
|
||||
while [ $wait_count -lt $max_wait ]; do
|
||||
if (set -C; echo $$ > "$lock_file") 2>/dev/null; then
|
||||
break
|
||||
@@ -347,14 +317,15 @@ update_backup_log() {
|
||||
sleep 0.1
|
||||
((wait_count++))
|
||||
done
|
||||
|
||||
|
||||
if [ $wait_count -ge $max_wait ]; then
|
||||
log_warning "Could not acquire lock for JSON log update"
|
||||
return 1
|
||||
fi
|
||||
|
||||
|
||||
# Create entry for this backup
|
||||
local entry=$(jq -n \
|
||||
local entry
|
||||
entry=$(jq -n \
|
||||
--arg service "$service" \
|
||||
--arg src "$src" \
|
||||
--arg dest "$dest" \
|
||||
@@ -371,7 +342,7 @@ update_backup_log() {
|
||||
checksum: $checksum,
|
||||
timestamp: $timestamp
|
||||
}')
|
||||
|
||||
|
||||
# Update JSON log safely
|
||||
if jq --argjson entry "$entry" --arg service "$service" \
|
||||
'.[$service] = $entry' "$JSON_LOG_FILE" > "${JSON_LOG_FILE}.tmp" 2>/dev/null; then
|
||||
@@ -379,7 +350,7 @@ update_backup_log() {
|
||||
else
|
||||
rm -f "${JSON_LOG_FILE}.tmp"
|
||||
fi
|
||||
|
||||
|
||||
# Remove lock file
|
||||
rm -f "$lock_file"
|
||||
}
|
||||
@@ -387,12 +358,12 @@ update_backup_log() {
|
||||
# Check if Docker container is running
|
||||
check_container_running() {
|
||||
local container="$1"
|
||||
|
||||
|
||||
if ! docker ps --format "table {{.Names}}" | grep -q "^${container}$"; then
|
||||
log_warning "Container '$container' is not running"
|
||||
return 1
|
||||
fi
|
||||
|
||||
|
||||
return 0
|
||||
}
|
||||
|
||||
@@ -401,18 +372,20 @@ verify_backup() {
|
||||
local src_container="$1"
|
||||
local src_path="$2"
|
||||
local dest_path="$3"
|
||||
|
||||
|
||||
if [ "$VERIFY_BACKUPS" != true ]; then
|
||||
return 0
|
||||
fi
|
||||
|
||||
|
||||
log_info "Verifying backup integrity for $src_container:$src_path"
|
||||
|
||||
|
||||
# For files, compare checksums
|
||||
if [[ "$src_path" == *.ini ]] || [[ "$src_path" == *.json ]]; then
|
||||
local src_checksum=$(docker exec "$src_container" md5sum "$src_path" 2>/dev/null | cut -d' ' -f1 || echo "")
|
||||
local dest_checksum=$(md5sum "$dest_path" 2>/dev/null | cut -d' ' -f1 || echo "")
|
||||
|
||||
local src_checksum
|
||||
local dest_checksum
|
||||
src_checksum=$(docker exec "$src_container" md5sum "$src_path" 2>/dev/null | cut -d' ' -f1 || echo "")
|
||||
dest_checksum=$(md5sum "$dest_path" 2>/dev/null | cut -d' ' -f1 || echo "")
|
||||
|
||||
if [ -n "$src_checksum" ] && [ -n "$dest_checksum" ] && [ "$src_checksum" == "$dest_checksum" ]; then
|
||||
log_success "Backup verification passed for $src_container:$src_path"
|
||||
return 0
|
||||
@@ -421,10 +394,11 @@ verify_backup() {
|
||||
return 1
|
||||
fi
|
||||
fi
|
||||
|
||||
|
||||
# For directories, check if they exist and have content
|
||||
if [ -d "$dest_path" ]; then
|
||||
local file_count=$(find "$dest_path" -type f 2>/dev/null | wc -l)
|
||||
local file_count
|
||||
file_count=$(find "$dest_path" -type f 2>/dev/null | wc -l)
|
||||
if [ "$file_count" -gt 0 ]; then
|
||||
log_success "Backup verification passed for $src_container:$src_path ($file_count files)"
|
||||
return 0
|
||||
@@ -433,7 +407,7 @@ verify_backup() {
|
||||
return 1
|
||||
fi
|
||||
fi
|
||||
|
||||
|
||||
log_warning "Unable to verify backup for $src_container:$src_path"
|
||||
return 0
|
||||
}
|
||||
@@ -442,38 +416,39 @@ verify_backup() {
|
||||
backup_service() {
|
||||
local service="$1"
|
||||
local container="$1"
|
||||
local backup_start_time=$(date +%s)
|
||||
|
||||
local backup_start_time
|
||||
backup_start_time=$(date +%s)
|
||||
|
||||
log_message "Starting backup for service: $service"
|
||||
|
||||
|
||||
# Handle special cases for container names
|
||||
case "$service" in
|
||||
jellyseerr_db|jellyseerr_settings)
|
||||
container="jellyseerr"
|
||||
;;
|
||||
esac
|
||||
|
||||
|
||||
# Check if container is running
|
||||
if ! check_container_running "$container"; then
|
||||
log_file_details "$service" "${container}:${MEDIA_SERVICES[$service]}" "${BACKUP_DESTINATIONS[$service]}" "FAILED - Container not running"
|
||||
return 1
|
||||
fi
|
||||
|
||||
|
||||
local src_path="${MEDIA_SERVICES[$service]}"
|
||||
local dest_path="${BACKUP_DESTINATIONS[$service]}"
|
||||
|
||||
|
||||
# Create destination directory for jellyseerr
|
||||
if [[ "$service" == jellyseerr_* ]]; then
|
||||
mkdir -p "$(dirname "$dest_path")"
|
||||
fi
|
||||
|
||||
|
||||
# Perform the backup
|
||||
if [ "$DRY_RUN" == true ]; then
|
||||
log_info "DRY RUN: Would backup $container:$src_path to $dest_path"
|
||||
log_file_details "$service" "$container:$src_path" "$dest_path" "DRY RUN"
|
||||
return 0
|
||||
fi
|
||||
|
||||
|
||||
if [ "$INTERACTIVE_MODE" == true ]; then
|
||||
echo -n "Backup $service? (y/N): "
|
||||
read -r response
|
||||
@@ -482,14 +457,14 @@ backup_service() {
|
||||
return 0
|
||||
fi
|
||||
fi
|
||||
|
||||
|
||||
# Execute docker cp command
|
||||
local docker_cmd="docker cp $container:$src_path $dest_path"
|
||||
log_info "Executing: $docker_cmd"
|
||||
|
||||
|
||||
if $docker_cmd 2>&1 | tee -a "$LOG_FILE"; then
|
||||
log_success "Backup completed for $service"
|
||||
|
||||
|
||||
# Verify the backup
|
||||
if verify_backup "$container" "$src_path" "$dest_path"; then
|
||||
log_file_details "$service" "$container:$src_path" "$dest_path" "SUCCESS"
|
||||
@@ -510,7 +485,7 @@ backup_service() {
|
||||
backup_service_wrapper() {
|
||||
local service="$1"
|
||||
local temp_file="$2"
|
||||
|
||||
|
||||
if backup_service "$service"; then
|
||||
echo "SUCCESS:$service" >> "$temp_file"
|
||||
else
|
||||
@@ -521,45 +496,47 @@ backup_service_wrapper() {
|
||||
# Clean old backups based on age and count
|
||||
cleanup_old_backups() {
|
||||
log_message "Cleaning up old backups..."
|
||||
|
||||
|
||||
for service_dir in "${BACKUP_ROOT}"/*; do
|
||||
if [ ! -d "$service_dir" ]; then
|
||||
continue
|
||||
fi
|
||||
|
||||
local service=$(basename "$service_dir")
|
||||
|
||||
local service
|
||||
service=$(basename "$service_dir")
|
||||
log_info "Cleaning up old backups for $service"
|
||||
|
||||
|
||||
# Remove backups older than MAX_BACKUP_AGE_DAYS
|
||||
find "$service_dir" -type f -mtime +${MAX_BACKUP_AGE_DAYS} -delete 2>/dev/null || true
|
||||
find "$service_dir" -type d -empty -mtime +${MAX_BACKUP_AGE_DAYS} -delete 2>/dev/null || true
|
||||
|
||||
|
||||
# Keep only the most recent MAX_BACKUPS_TO_KEEP backups
|
||||
find "$service_dir" -type f -name "*.ini" -o -name "*.json" | sort -r | tail -n +$((MAX_BACKUPS_TO_KEEP + 1)) | xargs rm -f 2>/dev/null || true
|
||||
|
||||
|
||||
# Clean up old dated directories (for jellyseerr)
|
||||
find "$service_dir" -type d -name "backup_*" | sort -r | tail -n +$((MAX_BACKUPS_TO_KEEP + 1)) | xargs rm -rf 2>/dev/null || true
|
||||
done
|
||||
|
||||
|
||||
# Clean up old log files
|
||||
find "$LOG_ROOT" -name "media-backup-*.log" -mtime +${MAX_BACKUP_AGE_DAYS} -delete 2>/dev/null || true
|
||||
find "$LOG_ROOT" -name "media-backup-*.md" -mtime +${MAX_BACKUP_AGE_DAYS} -delete 2>/dev/null || true
|
||||
|
||||
|
||||
log_success "Cleanup completed"
|
||||
}
|
||||
|
||||
# Check disk space
|
||||
check_disk_space() {
|
||||
local required_space_mb=1000 # Minimum 1GB free space
|
||||
|
||||
local available_space_kb=$(df "$BACKUP_ROOT" | awk 'NR==2 {print $4}')
|
||||
|
||||
local available_space_kb
|
||||
available_space_kb=$(df "$BACKUP_ROOT" | awk 'NR==2 {print $4}')
|
||||
local available_space_mb=$((available_space_kb / 1024))
|
||||
|
||||
|
||||
if [ "$available_space_mb" -lt "$required_space_mb" ]; then
|
||||
log_error "Insufficient disk space. Available: ${available_space_mb}MB, Required: ${required_space_mb}MB"
|
||||
return 1
|
||||
fi
|
||||
|
||||
|
||||
log_info "Disk space check passed. Available: ${available_space_mb}MB"
|
||||
return 0
|
||||
}
|
||||
@@ -569,14 +546,15 @@ send_notification() {
|
||||
local title="$1"
|
||||
local message="$2"
|
||||
local status="${3:-info}"
|
||||
local hostname=$(hostname)
|
||||
local hostname
|
||||
hostname=$(hostname)
|
||||
local total_services=${#MEDIA_SERVICES[@]}
|
||||
local success_count="$4"
|
||||
local failed_count="$5"
|
||||
|
||||
|
||||
# Enhanced message with statistics
|
||||
local enhanced_message="$message\n\nServices: $total_services\nSuccessful: $success_count\nFailed: $failed_count\nHost: $hostname"
|
||||
|
||||
|
||||
# Console notification
|
||||
case "$status" in
|
||||
"success") log_success "$title: $enhanced_message" ;;
|
||||
@@ -584,12 +562,12 @@ send_notification() {
|
||||
"warning") log_warning "$title: $enhanced_message" ;;
|
||||
*) log_info "$title: $enhanced_message" ;;
|
||||
esac
|
||||
|
||||
|
||||
# Webhook notification
|
||||
if [ -n "$WEBHOOK_URL" ] && [ "$DRY_RUN" != true ]; then
|
||||
local tags="backup,media,${hostname}"
|
||||
[ "$failed_count" -gt 0 ] && tags="${tags},errors"
|
||||
|
||||
|
||||
curl -s \
|
||||
-H "tags:${tags}" \
|
||||
-d "$enhanced_message" \
|
||||
@@ -602,7 +580,7 @@ generate_summary_report() {
|
||||
local success_count="$1"
|
||||
local failed_count="$2"
|
||||
local total_time="$3"
|
||||
|
||||
|
||||
log_message "=== BACKUP SUMMARY REPORT ==="
|
||||
log_message "Total Services: ${#MEDIA_SERVICES[@]}"
|
||||
log_message "Successful Backups: $success_count"
|
||||
@@ -610,11 +588,11 @@ generate_summary_report() {
|
||||
log_message "Total Time: ${total_time}s"
|
||||
log_message "Log File: $LOG_FILE"
|
||||
log_message "Markdown Report: $MARKDOWN_LOG"
|
||||
|
||||
|
||||
if [ "$PERFORMANCE_MONITORING" == true ]; then
|
||||
log_message "Performance Log: $PERFORMANCE_LOG_FILE"
|
||||
fi
|
||||
|
||||
|
||||
# Add summary to markdown log
|
||||
{
|
||||
echo "# Media Backup Summary Report"
|
||||
@@ -630,18 +608,19 @@ generate_summary_report() {
|
||||
|
||||
# Main backup execution function
|
||||
main() {
|
||||
local script_start_time=$(date +%s)
|
||||
|
||||
local script_start_time
|
||||
script_start_time=$(date +%s)
|
||||
|
||||
log_message "=== MEDIA SERVICES BACKUP STARTED ==="
|
||||
log_message "Host: $(hostname)"
|
||||
log_message "Timestamp: $(date '+%Y-%m-%d %H:%M:%S')"
|
||||
log_message "Dry Run: $DRY_RUN"
|
||||
log_message "Parallel Mode: $PARALLEL_BACKUPS"
|
||||
log_message "Verify Backups: $VERIFY_BACKUPS"
|
||||
|
||||
|
||||
# Initialize logging
|
||||
initialize_json_log
|
||||
|
||||
|
||||
# Initialize markdown log
|
||||
{
|
||||
echo "# Media Services Backup Report"
|
||||
@@ -649,44 +628,45 @@ main() {
|
||||
echo "**Host**: $(hostname)"
|
||||
echo ""
|
||||
} > "$MARKDOWN_LOG"
|
||||
|
||||
|
||||
# Pre-flight checks
|
||||
if ! check_disk_space; then
|
||||
send_notification "Media Backup Failed" "Insufficient disk space" "error" 0 1
|
||||
exit 1
|
||||
fi
|
||||
|
||||
|
||||
# Check if Docker is running
|
||||
if ! docker info >/dev/null 2>&1; then
|
||||
log_error "Docker is not running or accessible"
|
||||
send_notification "Media Backup Failed" "Docker is not accessible" "error" 0 1
|
||||
exit 1
|
||||
fi
|
||||
|
||||
|
||||
local success_count=0
|
||||
local failed_count=0
|
||||
local backup_results=()
|
||||
|
||||
|
||||
if [ "$PARALLEL_BACKUPS" == true ]; then
|
||||
log_message "Running backups in parallel mode"
|
||||
|
||||
|
||||
# Create temporary file for collecting results
|
||||
local temp_results=$(mktemp)
|
||||
local temp_results
|
||||
temp_results=$(mktemp)
|
||||
local pids=()
|
||||
|
||||
|
||||
# Start backup jobs in parallel
|
||||
for service in "${!MEDIA_SERVICES[@]}"; do
|
||||
backup_service_wrapper "$service" "$temp_results" &
|
||||
pids+=($!)
|
||||
log_info "Started backup job for $service (PID: $!)"
|
||||
done
|
||||
|
||||
|
||||
# Wait for all jobs to complete
|
||||
for pid in "${pids[@]}"; do
|
||||
wait "$pid"
|
||||
log_info "Backup job completed (PID: $pid)"
|
||||
done
|
||||
|
||||
|
||||
# Collect results
|
||||
while IFS= read -r result; do
|
||||
if [[ "$result" == SUCCESS:* ]]; then
|
||||
@@ -697,12 +677,12 @@ main() {
|
||||
backup_results+=("✗ ${result#FAILED:}")
|
||||
fi
|
||||
done < "$temp_results"
|
||||
|
||||
|
||||
rm -f "$temp_results"
|
||||
|
||||
|
||||
else
|
||||
log_message "Running backups in sequential mode"
|
||||
|
||||
|
||||
# Run backups sequentially
|
||||
for service in "${!MEDIA_SERVICES[@]}"; do
|
||||
if backup_service "$service"; then
|
||||
@@ -714,22 +694,23 @@ main() {
|
||||
fi
|
||||
done
|
||||
fi
|
||||
|
||||
|
||||
# Calculate total time
|
||||
local script_end_time=$(date +%s)
|
||||
local script_end_time
|
||||
script_end_time=$(date +%s)
|
||||
local total_time=$((script_end_time - script_start_time))
|
||||
|
||||
|
||||
# Track overall performance
|
||||
track_performance "full_media_backup" "$script_start_time" "$script_end_time"
|
||||
|
||||
|
||||
# Clean up old backups (only if not dry run)
|
||||
if [ "$DRY_RUN" != true ]; then
|
||||
cleanup_old_backups
|
||||
fi
|
||||
|
||||
|
||||
# Generate summary report
|
||||
generate_summary_report "$success_count" "$failed_count" "$total_time"
|
||||
|
||||
|
||||
# Add results to markdown log
|
||||
{
|
||||
echo "## Backup Results"
|
||||
@@ -740,28 +721,28 @@ main() {
|
||||
echo "**Completed**: $(date '+%Y-%m-%d %H:%M:%S')"
|
||||
echo "**Duration**: ${total_time}s"
|
||||
} >> "$MARKDOWN_LOG"
|
||||
|
||||
|
||||
# Send notification
|
||||
local status="success"
|
||||
local message="Media backup completed"
|
||||
|
||||
|
||||
if [ "$failed_count" -gt 0 ]; then
|
||||
status="warning"
|
||||
message="Media backup completed with $failed_count failures"
|
||||
fi
|
||||
|
||||
|
||||
if [ "$DRY_RUN" == true ]; then
|
||||
message="Media backup dry run completed"
|
||||
status="info"
|
||||
fi
|
||||
|
||||
|
||||
send_notification "Media Backup Complete" "$message" "$status" "$success_count" "$failed_count"
|
||||
|
||||
|
||||
# Exit with error code if any backups failed
|
||||
if [ "$failed_count" -gt 0 ]; then
|
||||
exit 1
|
||||
fi
|
||||
|
||||
|
||||
log_success "All media backups completed successfully!"
|
||||
exit 0
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user