mirror of
https://github.com/acedanger/shell.git
synced 2025-12-06 01:10:12 -08:00
feat: Add comprehensive Plex recovery validation script
- Introduced `validate-plex-recovery.sh` for validating Plex database recovery. - Implemented checks for service status, database integrity, web interface accessibility, API functionality, and recent logs. - Added detailed recovery summary and next steps for users. fix: Improve Debian patching script for compatibility - Enhanced `debian-patches.sh` to securely download and execute bootstrap scripts. - Updated package mapping logic and ensured proper permissions for patched files. fix: Update Docker test scripts for better permission handling - Modified `run-docker-tests.sh` to set appropriate permissions on logs directory. - Ensured log files have correct permissions after test runs. fix: Enhance setup scripts for secure installations - Updated `setup.sh` to securely download and execute installation scripts for zoxide and nvm. - Improved error handling for failed downloads. fix: Refine startup script for log directory permissions - Adjusted `startup.sh` to set proper permissions for log directories and files. chore: Revamp update-containers.sh for better error handling and logging - Rewrote `update-containers.sh` to include detailed logging and error handling. - Added validation for Docker image names and improved overall script robustness.
This commit is contained in:
@@ -1,5 +1,50 @@
|
||||
#!/bin/bash
|
||||
|
||||
################################################################################
|
||||
# Plex Backup Validation and Health Monitoring Script
|
||||
################################################################################
|
||||
#
|
||||
# Author: Peter Wood <peter@peterwood.dev>
|
||||
# Description: Comprehensive backup validation system that verifies archive
|
||||
# integrity, database health, and backup completeness with
|
||||
# automated repair capabilities and detailed reporting.
|
||||
#
|
||||
# Features:
|
||||
# - Archive integrity verification (checksum validation)
|
||||
# - Database integrity checking within backups
|
||||
# - Backup completeness validation
|
||||
# - Automated repair suggestions and fixes
|
||||
# - Historical backup analysis
|
||||
# - Performance metrics and reporting
|
||||
# - Email and webhook notifications
|
||||
#
|
||||
# Related Scripts:
|
||||
# - backup-plex.sh: Creates backups validated by this script
|
||||
# - restore-plex.sh: Uses validation results for safe restoration
|
||||
# - monitor-plex-backup.sh: Real-time system monitoring
|
||||
# - test-plex-backup.sh: Automated testing framework
|
||||
# - plex.sh: General Plex service management
|
||||
#
|
||||
# Usage:
|
||||
# ./validate-plex-backups.sh # Validate all backups
|
||||
# ./validate-plex-backups.sh --fix # Validate and fix issues
|
||||
# ./validate-plex-backups.sh --report # Generate detailed report
|
||||
# ./validate-plex-backups.sh --latest # Validate only latest backup
|
||||
#
|
||||
# Dependencies:
|
||||
# - tar (for archive extraction and validation)
|
||||
# - sqlite3 or Plex SQLite (for database validation)
|
||||
# - jq (for JSON processing)
|
||||
# - curl (for webhook notifications)
|
||||
#
|
||||
# Exit Codes:
|
||||
# 0 - Success, all backups valid
|
||||
# 1 - General error
|
||||
# 2 - Backup validation failures found
|
||||
# 3 - Critical system issues
|
||||
#
|
||||
################################################################################
|
||||
|
||||
# Plex Backup Validation and Monitoring Script
|
||||
# Usage: ./validate-plex-backups.sh [--fix] [--report]
|
||||
|
||||
@@ -34,10 +79,10 @@ declare -A OPTIONAL_FILES=(
|
||||
log_message() {
|
||||
local message="$1"
|
||||
local clean_message="$2"
|
||||
|
||||
|
||||
# Display colored message to terminal
|
||||
echo -e "$(date '+%H:%M:%S') $message"
|
||||
|
||||
|
||||
# Strip ANSI codes and log clean version to file
|
||||
if [ -n "$clean_message" ]; then
|
||||
echo "$(date '+%H:%M:%S') $clean_message" >> "$REPORT_FILE"
|
||||
@@ -67,28 +112,28 @@ log_info() {
|
||||
sync_logs_to_shared() {
|
||||
local sync_start_time=$(date +%s)
|
||||
log_info "Starting log synchronization to shared location"
|
||||
|
||||
|
||||
# Ensure shared log directory exists
|
||||
if ! mkdir -p "$SHARED_LOG_ROOT" 2>/dev/null; then
|
||||
log_warning "Could not create shared log directory: $SHARED_LOG_ROOT"
|
||||
return 1
|
||||
fi
|
||||
|
||||
|
||||
# Check if shared location is accessible
|
||||
if [ ! -w "$SHARED_LOG_ROOT" ]; then
|
||||
log_warning "Shared log directory is not writable: $SHARED_LOG_ROOT"
|
||||
return 1
|
||||
fi
|
||||
|
||||
|
||||
# Sync log files (one-way: local -> shared)
|
||||
local sync_count=0
|
||||
local error_count=0
|
||||
|
||||
|
||||
for log_file in "$LOCAL_LOG_ROOT"/*.log; do
|
||||
if [ -f "$log_file" ]; then
|
||||
local filename=$(basename "$log_file")
|
||||
local shared_file="$SHARED_LOG_ROOT/$filename"
|
||||
|
||||
|
||||
# Only copy if file doesn't exist in shared location or local is newer
|
||||
if [ ! -f "$shared_file" ] || [ "$log_file" -nt "$shared_file" ]; then
|
||||
if cp "$log_file" "$shared_file" 2>/dev/null; then
|
||||
@@ -101,16 +146,16 @@ sync_logs_to_shared() {
|
||||
fi
|
||||
fi
|
||||
done
|
||||
|
||||
|
||||
local sync_end_time=$(date +%s)
|
||||
local sync_duration=$((sync_end_time - sync_start_time))
|
||||
|
||||
|
||||
if [ $error_count -eq 0 ]; then
|
||||
log_success "Log sync completed: $sync_count files synced in ${sync_duration}s"
|
||||
else
|
||||
log_warning "Log sync completed with errors: $sync_count synced, $error_count failed in ${sync_duration}s"
|
||||
fi
|
||||
|
||||
|
||||
return $error_count
|
||||
}
|
||||
|
||||
@@ -118,15 +163,15 @@ sync_logs_to_shared() {
|
||||
cleanup_old_local_logs() {
|
||||
local cleanup_start_time=$(date +%s)
|
||||
log_info "Starting cleanup of old local logs (30+ days)"
|
||||
|
||||
|
||||
if [ ! -d "$LOCAL_LOG_ROOT" ]; then
|
||||
log_info "Local log directory does not exist, nothing to clean up"
|
||||
return 0
|
||||
fi
|
||||
|
||||
|
||||
local cleanup_count=0
|
||||
local error_count=0
|
||||
|
||||
|
||||
# Find and remove log files older than 30 days
|
||||
while IFS= read -r -d '' old_file; do
|
||||
local filename=$(basename "$old_file")
|
||||
@@ -138,66 +183,66 @@ cleanup_old_local_logs() {
|
||||
log_warning "Failed to remove old log: $filename"
|
||||
fi
|
||||
done < <(find "$LOCAL_LOG_ROOT" -name "*.log" -mtime +30 -print0 2>/dev/null)
|
||||
|
||||
|
||||
local cleanup_end_time=$(date +%s)
|
||||
local cleanup_duration=$((cleanup_end_time - cleanup_start_time))
|
||||
|
||||
|
||||
if [ $cleanup_count -gt 0 ]; then
|
||||
log_success "Cleanup completed: $cleanup_count items removed in ${cleanup_duration}s"
|
||||
else
|
||||
log_info "Cleanup completed: no old items found to remove in ${cleanup_duration}s"
|
||||
fi
|
||||
|
||||
|
||||
return $error_count
|
||||
}
|
||||
|
||||
# Check dependencies
|
||||
check_dependencies() {
|
||||
local missing_deps=()
|
||||
|
||||
|
||||
# Check for required commands
|
||||
if ! command -v tar >/dev/null 2>&1; then
|
||||
missing_deps+=("tar")
|
||||
fi
|
||||
|
||||
|
||||
if ! command -v find >/dev/null 2>&1; then
|
||||
missing_deps+=("find")
|
||||
fi
|
||||
|
||||
|
||||
if ! command -v df >/dev/null 2>&1; then
|
||||
missing_deps+=("df")
|
||||
fi
|
||||
|
||||
|
||||
if ! command -v du >/dev/null 2>&1; then
|
||||
missing_deps+=("du")
|
||||
fi
|
||||
|
||||
|
||||
if [ ${#missing_deps[@]} -gt 0 ]; then
|
||||
log_error "Missing required dependencies: ${missing_deps[*]}"
|
||||
log_info "Please install missing dependencies before running this script"
|
||||
return 1
|
||||
fi
|
||||
|
||||
|
||||
return 0
|
||||
}
|
||||
|
||||
# Check backup directory structure
|
||||
validate_backup_structure() {
|
||||
log_info "Validating backup directory structure..."
|
||||
|
||||
|
||||
if [ ! -d "$BACKUP_ROOT" ]; then
|
||||
log_error "Backup root directory not found: $BACKUP_ROOT"
|
||||
return 1
|
||||
fi
|
||||
|
||||
|
||||
local backup_count=$(find "$BACKUP_ROOT" -maxdepth 1 -type f -name "plex-backup-*.tar.gz" | wc -l)
|
||||
log_info "Found $backup_count backup files"
|
||||
|
||||
|
||||
if [ "$backup_count" -eq 0 ]; then
|
||||
log_warning "No backup files found"
|
||||
return 1
|
||||
fi
|
||||
|
||||
|
||||
return 0
|
||||
}
|
||||
|
||||
@@ -206,35 +251,35 @@ validate_backup() {
|
||||
local backup_file="$1"
|
||||
local backup_name=$(basename "$backup_file")
|
||||
local errors=0
|
||||
|
||||
|
||||
log_info "Validating backup: $backup_name"
|
||||
|
||||
|
||||
# Check if file exists and is readable
|
||||
if [ ! -f "$backup_file" ] || [ ! -r "$backup_file" ]; then
|
||||
log_error "Backup file not accessible: $backup_file"
|
||||
return 1
|
||||
fi
|
||||
|
||||
|
||||
# Test archive integrity
|
||||
if ! tar -tzf "$backup_file" >/dev/null 2>&1; then
|
||||
log_error "Archive integrity check failed: $backup_name"
|
||||
errors=$((errors + 1))
|
||||
else
|
||||
log_success "Archive integrity check passed: $backup_name"
|
||||
|
||||
|
||||
# Check for expected files in archive
|
||||
local archive_contents=$(tar -tzf "$backup_file" 2>/dev/null)
|
||||
|
||||
|
||||
# Check if this is a legacy backup with dated subdirectory
|
||||
local has_dated_subdir=false
|
||||
if echo "$archive_contents" | grep -q "^\./[0-9]\{8\}/" || echo "$archive_contents" | grep -q "^[0-9]\{8\}/"; then
|
||||
has_dated_subdir=true
|
||||
log_info " Detected legacy backup format with dated subdirectory"
|
||||
fi
|
||||
|
||||
|
||||
for file in "${EXPECTED_FILES[@]}"; do
|
||||
local file_found=false
|
||||
|
||||
|
||||
if [ "$has_dated_subdir" = true ]; then
|
||||
# For legacy backups, look for files in dated subdirectory (with or without timestamps)
|
||||
if echo "$archive_contents" | grep -q "^\./[0-9]\{8\}/$file" || \
|
||||
@@ -250,14 +295,14 @@ validate_backup() {
|
||||
file_found=true
|
||||
fi
|
||||
fi
|
||||
|
||||
|
||||
if [ "$file_found" = true ]; then
|
||||
log_success " Found: $file"
|
||||
else
|
||||
# Check if this is an optional file that might not exist in older backups
|
||||
local backup_name=$(basename "$backup_file")
|
||||
local backup_datetime=$(echo "$backup_name" | sed 's/plex-backup-\([0-9]\{8\}_[0-9]\{6\}\)\.tar\.gz/\1/')
|
||||
|
||||
|
||||
if [[ -n "${OPTIONAL_FILES[$file]}" ]] && [[ "$backup_datetime" < "${OPTIONAL_FILES[$file]}" ]]; then
|
||||
log_warning " Missing file (expected for backup date): $file"
|
||||
log_info " Note: $file was introduced around ${OPTIONAL_FILES[$file]}, this backup is from $backup_datetime"
|
||||
@@ -267,7 +312,7 @@ validate_backup() {
|
||||
fi
|
||||
fi
|
||||
done
|
||||
|
||||
|
||||
# Check for unexpected files (more lenient for legacy backups)
|
||||
local unexpected_files=()
|
||||
while IFS= read -r line; do
|
||||
@@ -275,7 +320,7 @@ validate_backup() {
|
||||
if [[ "$line" == "./" ]] || [[ "$line" == */ ]] || [[ -z "$line" ]]; then
|
||||
continue
|
||||
fi
|
||||
|
||||
|
||||
# Extract filename from path (handle both legacy and new formats)
|
||||
local filename=""
|
||||
if [[ "$line" =~ ^\./[0-9]{8}/(.+)$ ]] || [[ "$line" =~ ^[0-9]{8}/(.+)$ ]]; then
|
||||
@@ -290,7 +335,7 @@ validate_backup() {
|
||||
# Direct filename
|
||||
filename="$line"
|
||||
fi
|
||||
|
||||
|
||||
# Check if this is an expected file
|
||||
local is_expected=false
|
||||
for expected_file in "${EXPECTED_FILES[@]}"; do
|
||||
@@ -299,12 +344,12 @@ validate_backup() {
|
||||
break
|
||||
fi
|
||||
done
|
||||
|
||||
|
||||
if [ "$is_expected" = false ]; then
|
||||
unexpected_files+=("$line")
|
||||
fi
|
||||
done <<< "$archive_contents"
|
||||
|
||||
|
||||
# Report unexpected files if any found
|
||||
if [ ${#unexpected_files[@]} -gt 0 ]; then
|
||||
for unexpected_file in "${unexpected_files[@]}"; do
|
||||
@@ -312,44 +357,44 @@ validate_backup() {
|
||||
done
|
||||
fi
|
||||
fi
|
||||
|
||||
|
||||
return $errors
|
||||
}
|
||||
|
||||
# Check backup freshness
|
||||
check_backup_freshness() {
|
||||
log_info "Checking backup freshness..."
|
||||
|
||||
|
||||
local latest_backup=$(find "$BACKUP_ROOT" -maxdepth 1 -type f -name "plex-backup-*.tar.gz" 2>/dev/null | sort | tail -1)
|
||||
|
||||
|
||||
if [ -z "$latest_backup" ]; then
|
||||
log_error "No backups found"
|
||||
return 1
|
||||
fi
|
||||
|
||||
|
||||
local backup_filename=$(basename "$latest_backup")
|
||||
# Extract date from filename: plex-backup-YYYYMMDD_HHMMSS.tar.gz
|
||||
local backup_datetime=$(echo "$backup_filename" | sed 's/plex-backup-\([0-9]\{8\}_[0-9]\{6\}\)\.tar\.gz/\1/')
|
||||
|
||||
|
||||
# Validate that we extracted a valid datetime
|
||||
if [[ ! "$backup_datetime" =~ ^[0-9]{8}_[0-9]{6}$ ]]; then
|
||||
log_error "Could not parse backup date from filename: $backup_filename"
|
||||
return 1
|
||||
fi
|
||||
|
||||
|
||||
local backup_date="${backup_datetime%_*}" # Remove time part
|
||||
|
||||
|
||||
# Validate date format and convert to timestamp
|
||||
if ! backup_timestamp=$(date -d "${backup_date:0:4}-${backup_date:4:2}-${backup_date:6:2}" +%s 2>/dev/null); then
|
||||
log_error "Invalid backup date format: $backup_date"
|
||||
return 1
|
||||
fi
|
||||
|
||||
|
||||
local current_timestamp=$(date +%s)
|
||||
local age_days=$(( (current_timestamp - backup_timestamp) / 86400 ))
|
||||
|
||||
|
||||
log_info "Latest backup: $backup_datetime ($age_days days old)"
|
||||
|
||||
|
||||
if [ "$age_days" -gt 7 ]; then
|
||||
log_warning "Latest backup is older than 7 days"
|
||||
return 1
|
||||
@@ -358,7 +403,7 @@ check_backup_freshness() {
|
||||
else
|
||||
log_success "Latest backup is recent"
|
||||
fi
|
||||
|
||||
|
||||
return 0
|
||||
}
|
||||
|
||||
@@ -373,11 +418,11 @@ validate_json_log() {
|
||||
# Check backup file sizes for anomalies
|
||||
check_backup_sizes() {
|
||||
log_info "Checking backup file sizes..."
|
||||
|
||||
|
||||
local backup_files=()
|
||||
local backup_sizes=()
|
||||
local total_size=0
|
||||
|
||||
|
||||
# Collect backup files and their sizes
|
||||
while IFS= read -r backup_file; do
|
||||
if [ -f "$backup_file" ] && [ -r "$backup_file" ]; then
|
||||
@@ -387,32 +432,32 @@ check_backup_sizes() {
|
||||
total_size=$((total_size + size))
|
||||
fi
|
||||
done < <(find "$BACKUP_ROOT" -maxdepth 1 -type f -name "plex-backup-*.tar.gz" 2>/dev/null | sort)
|
||||
|
||||
|
||||
if [ ${#backup_files[@]} -eq 0 ]; then
|
||||
log_warning "No backup files found for size analysis"
|
||||
return 1
|
||||
fi
|
||||
|
||||
|
||||
# Calculate average size
|
||||
local avg_size=$((total_size / ${#backup_files[@]}))
|
||||
local human_total=$(numfmt --to=iec "$total_size" 2>/dev/null || echo "${total_size} bytes")
|
||||
local human_avg=$(numfmt --to=iec "$avg_size" 2>/dev/null || echo "${avg_size} bytes")
|
||||
|
||||
|
||||
log_info "Total backup size: $human_total"
|
||||
log_info "Average backup size: $human_avg"
|
||||
|
||||
|
||||
# Check for suspiciously small backups (less than 50% of average)
|
||||
local min_size=$((avg_size / 2))
|
||||
local suspicious_count=0
|
||||
|
||||
|
||||
for i in "${!backup_files[@]}"; do
|
||||
local file="${backup_files[$i]}"
|
||||
local size="${backup_sizes[$i]}"
|
||||
local filename=$(basename "$file")
|
||||
|
||||
|
||||
if [ "$size" -lt "$min_size" ] && [ "$size" -gt 0 ]; then
|
||||
local human_size=$(numfmt --to=iec "$size" 2>/dev/null || echo "${size} bytes")
|
||||
|
||||
|
||||
# Extract backup datetime to check if it's a pre-blobs backup
|
||||
local backup_datetime=$(echo "$filename" | sed 's/plex-backup-\([0-9]\{8\}_[0-9]\{6\}\)\.tar\.gz/\1/')
|
||||
if [[ "$backup_datetime" =~ ^[0-9]{8}_[0-9]{6}$ ]] && [[ "$backup_datetime" < "20250526_144500" ]]; then
|
||||
@@ -424,29 +469,29 @@ check_backup_sizes() {
|
||||
fi
|
||||
fi
|
||||
done
|
||||
|
||||
|
||||
if [ "$suspicious_count" -gt 0 ]; then
|
||||
log_warning "Found $suspicious_count backup(s) that may be incomplete"
|
||||
return 1
|
||||
else
|
||||
log_success "All backup sizes appear normal"
|
||||
fi
|
||||
|
||||
|
||||
return 0
|
||||
}
|
||||
|
||||
# Check disk space
|
||||
check_disk_space() {
|
||||
log_info "Checking disk space..."
|
||||
|
||||
|
||||
local backup_disk_usage=$(du -sh "$BACKUP_ROOT" | cut -f1)
|
||||
local available_space=$(df -h "$BACKUP_ROOT" | awk 'NR==2 {print $4}')
|
||||
local used_percentage=$(df "$BACKUP_ROOT" | awk 'NR==2 {print $5}' | sed 's/%//')
|
||||
|
||||
|
||||
log_info "Backup disk usage: $backup_disk_usage"
|
||||
log_info "Available space: $available_space"
|
||||
log_info "Disk usage: $used_percentage%"
|
||||
|
||||
|
||||
if [ "$used_percentage" -gt 90 ]; then
|
||||
log_error "Disk usage is above 90%"
|
||||
return 1
|
||||
@@ -455,55 +500,55 @@ check_disk_space() {
|
||||
else
|
||||
log_success "Disk usage is acceptable"
|
||||
fi
|
||||
|
||||
|
||||
return 0
|
||||
}
|
||||
|
||||
# Generate backup report
|
||||
generate_report() {
|
||||
log_info "Generating backup report..."
|
||||
|
||||
|
||||
local total_backups=0
|
||||
local valid_backups=0
|
||||
local total_errors=0
|
||||
|
||||
|
||||
# Header
|
||||
echo "==================================" >> "$REPORT_FILE"
|
||||
echo "Plex Backup Validation Report" >> "$REPORT_FILE"
|
||||
echo "Generated: $(date)" >> "$REPORT_FILE"
|
||||
echo "==================================" >> "$REPORT_FILE"
|
||||
|
||||
|
||||
# Use process substitution to avoid subshell variable scope issues
|
||||
while IFS= read -r backup_file; do
|
||||
total_backups=$((total_backups + 1))
|
||||
validate_backup "$backup_file"
|
||||
local backup_errors=$?
|
||||
|
||||
|
||||
if [ "$backup_errors" -eq 0 ]; then
|
||||
valid_backups=$((valid_backups + 1))
|
||||
else
|
||||
total_errors=$((total_errors + backup_errors))
|
||||
fi
|
||||
done < <(find "$BACKUP_ROOT" -maxdepth 1 -type f -name "plex-backup-*.tar.gz" | sort)
|
||||
|
||||
|
||||
# Summary
|
||||
echo >> "$REPORT_FILE"
|
||||
echo "Summary:" >> "$REPORT_FILE"
|
||||
echo " Total backups: $total_backups" >> "$REPORT_FILE"
|
||||
echo " Valid backups: $valid_backups" >> "$REPORT_FILE"
|
||||
echo " Total errors: $total_errors" >> "$REPORT_FILE"
|
||||
|
||||
|
||||
log_success "Report generated: $REPORT_FILE"
|
||||
}
|
||||
|
||||
# Fix common issues
|
||||
fix_issues() {
|
||||
log_info "Attempting to fix common issues..."
|
||||
|
||||
|
||||
# Create corrupted backups directory
|
||||
local corrupted_dir="$(dirname "$REPORT_FILE")/corrupted-backups"
|
||||
mkdir -p "$corrupted_dir"
|
||||
|
||||
|
||||
# Check for and move corrupted backup files using process substitution
|
||||
local corrupted_count=0
|
||||
while IFS= read -r backup_file; do
|
||||
@@ -511,7 +556,7 @@ fix_issues() {
|
||||
log_warning "Found corrupted backup: $(basename "$backup_file")"
|
||||
local backup_name=$(basename "$backup_file")
|
||||
local corrupted_backup="$corrupted_dir/$backup_name"
|
||||
|
||||
|
||||
if mv "$backup_file" "$corrupted_backup"; then
|
||||
log_success "Moved corrupted backup to: $corrupted_backup"
|
||||
corrupted_count=$((corrupted_count + 1))
|
||||
@@ -520,14 +565,14 @@ fix_issues() {
|
||||
fi
|
||||
fi
|
||||
done < <(find "$BACKUP_ROOT" -maxdepth 1 -type f -name "plex-backup-*.tar.gz" 2>/dev/null || true)
|
||||
|
||||
|
||||
if [ "$corrupted_count" -gt 0 ]; then
|
||||
log_info "Moved $corrupted_count corrupted backup(s) to $corrupted_dir"
|
||||
fi
|
||||
|
||||
|
||||
# Clean up any remaining dated directories from old backup structure
|
||||
find "$BACKUP_ROOT" -maxdepth 1 -type d -name "????????" -exec rm -rf {} \; 2>/dev/null || true
|
||||
|
||||
|
||||
# Fix permissions if needed
|
||||
if [ -d "$BACKUP_ROOT" ]; then
|
||||
chmod 755 "$BACKUP_ROOT" 2>/dev/null || log_warning "Could not fix backup root permissions"
|
||||
@@ -541,7 +586,7 @@ main() {
|
||||
local fix_mode=false
|
||||
local report_mode=false
|
||||
local verbose_mode=false
|
||||
|
||||
|
||||
# Parse arguments
|
||||
while [[ $# -gt 0 ]]; do
|
||||
case $1 in
|
||||
@@ -578,31 +623,31 @@ main() {
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
|
||||
log_info "Starting Plex backup validation..."
|
||||
|
||||
|
||||
# Check dependencies first
|
||||
if ! check_dependencies; then
|
||||
exit 1
|
||||
fi
|
||||
|
||||
|
||||
# Create logs directory if needed
|
||||
mkdir -p "$(dirname "$REPORT_FILE")"
|
||||
|
||||
|
||||
local overall_status=0
|
||||
local critical_errors=0
|
||||
local warnings=0
|
||||
|
||||
|
||||
# Fix issues if requested
|
||||
if [ "$fix_mode" = true ]; then
|
||||
fix_issues
|
||||
fi
|
||||
|
||||
|
||||
# Validate backup structure
|
||||
if ! validate_backup_structure; then
|
||||
critical_errors=$((critical_errors + 1))
|
||||
fi
|
||||
|
||||
|
||||
# Check backup freshness
|
||||
if ! check_backup_freshness; then
|
||||
local freshness_result=$?
|
||||
@@ -616,29 +661,29 @@ main() {
|
||||
warnings=$((warnings + 1))
|
||||
fi
|
||||
fi
|
||||
|
||||
|
||||
# Validate JSON log
|
||||
if ! validate_json_log; then
|
||||
critical_errors=$((critical_errors + 1))
|
||||
fi
|
||||
|
||||
|
||||
# Check disk space
|
||||
if ! check_disk_space; then
|
||||
warnings=$((warnings + 1))
|
||||
fi
|
||||
|
||||
|
||||
# Check backup file sizes
|
||||
if [ "$verbose_mode" = true ] || [ "$report_mode" = true ]; then
|
||||
if ! check_backup_sizes; then
|
||||
warnings=$((warnings + 1))
|
||||
fi
|
||||
fi
|
||||
|
||||
|
||||
# Generate detailed report if requested
|
||||
if [ "$report_mode" = true ]; then
|
||||
generate_report
|
||||
fi
|
||||
|
||||
|
||||
# Final summary
|
||||
echo
|
||||
if [ "$critical_errors" -eq 0 ] && [ "$warnings" -eq 0 ]; then
|
||||
@@ -655,12 +700,12 @@ main() {
|
||||
echo "Consider running with --fix to attempt automatic repairs"
|
||||
echo "Use --report for a detailed backup analysis"
|
||||
fi
|
||||
|
||||
|
||||
# Sync logs to shared location and cleanup old local logs
|
||||
log_info "Post-validation: synchronizing logs and cleaning up old files"
|
||||
sync_logs_to_shared
|
||||
cleanup_old_local_logs
|
||||
|
||||
|
||||
exit $overall_status
|
||||
}
|
||||
|
||||
|
||||
Reference in New Issue
Block a user