mirror of
https://github.com/acedanger/shell.git
synced 2025-12-06 00:00:13 -08:00
Refactor variable assignments and improve script readability in validate-plex-backups.sh and validate-plex-recovery.sh
- Changed inline variable assignments to separate declaration and assignment for clarity. - Updated condition checks and log messages for better readability and consistency. - Added a backup of validate-plex-recovery.sh for safety. - Introduced a new script run-docker-tests.sh for testing setup in Docker containers. - Enhanced ssh-login.sh to improve condition checks and logging functionality.
This commit is contained in:
@@ -57,12 +57,7 @@ BLUE='\033[0;34m'
|
||||
CYAN='\033[0;36m'
|
||||
NC='\033[0m' # No Color
|
||||
|
||||
# Performance tracking variables
|
||||
SCRIPT_START_TIME=$(date +%s)
|
||||
BACKUP_START_TIME=""
|
||||
VERIFICATION_START_TIME=""
|
||||
SERVICE_STOP_TIME=""
|
||||
SERVICE_START_TIME=""
|
||||
# Performance tracking variables (removed unused variables)
|
||||
|
||||
# Configuration
|
||||
MAX_BACKUP_AGE_DAYS=30
|
||||
@@ -168,7 +163,8 @@ declare -A PLEX_FILES=(
|
||||
# Logging functions
|
||||
log_message() {
|
||||
local message="$1"
|
||||
local timestamp=$(date '+%Y-%m-%d %H:%M:%S')
|
||||
local timestamp
|
||||
timestamp=$(date '+%Y-%m-%d %H:%M:%S')
|
||||
echo -e "${CYAN}[${timestamp}]${NC} ${message}"
|
||||
mkdir -p "$LOCAL_LOG_ROOT"
|
||||
echo "[${timestamp}] $message" >> "${LOCAL_LOG_ROOT}/plex-backup-$(date '+%Y-%m-%d').log" 2>/dev/null || true
|
||||
@@ -176,7 +172,8 @@ log_message() {
|
||||
|
||||
log_error() {
|
||||
local message="$1"
|
||||
local timestamp=$(date '+%Y-%m-%d %H:%M:%S')
|
||||
local timestamp
|
||||
timestamp=$(date '+%Y-%m-%d %H:%M:%S')
|
||||
echo -e "${RED}[${timestamp}] ERROR:${NC} ${message}"
|
||||
mkdir -p "$LOCAL_LOG_ROOT"
|
||||
echo "[${timestamp}] ERROR: $message" >> "${LOCAL_LOG_ROOT}/plex-backup-$(date '+%Y-%m-%d').log" 2>/dev/null || true
|
||||
@@ -184,7 +181,8 @@ log_error() {
|
||||
|
||||
log_success() {
|
||||
local message="$1"
|
||||
local timestamp=$(date '+%Y-%m-%d %H:%M:%S')
|
||||
local timestamp
|
||||
timestamp=$(date '+%Y-%m-%d %H:%M:%S')
|
||||
echo -e "${GREEN}[${timestamp}] SUCCESS:${NC} ${message}"
|
||||
mkdir -p "$LOCAL_LOG_ROOT"
|
||||
echo "[${timestamp}] SUCCESS: $message" >> "${LOCAL_LOG_ROOT}/plex-backup-$(date '+%Y-%m-%d').log" 2>/dev/null || true
|
||||
@@ -192,7 +190,8 @@ log_success() {
|
||||
|
||||
log_warning() {
|
||||
local message="$1"
|
||||
local timestamp=$(date '+%Y-%m-%d %H:%M:%S')
|
||||
local timestamp
|
||||
timestamp=$(date '+%Y-%m-%d %H:%M:%S')
|
||||
echo -e "${YELLOW}[${timestamp}] WARNING:${NC} ${message}"
|
||||
mkdir -p "$LOCAL_LOG_ROOT"
|
||||
echo "[${timestamp}] WARNING: $message" >> "${LOCAL_LOG_ROOT}/plex-backup-$(date '+%Y-%m-%d').log" 2>/dev/null || true
|
||||
@@ -200,7 +199,8 @@ log_warning() {
|
||||
|
||||
log_info() {
|
||||
local message="$1"
|
||||
local timestamp=$(date '+%Y-%m-%d %H:%M:%S')
|
||||
local timestamp
|
||||
timestamp=$(date '+%Y-%m-%d %H:%M:%S')
|
||||
echo -e "${BLUE}[${timestamp}] INFO:${NC} ${message}"
|
||||
mkdir -p "$LOCAL_LOG_ROOT"
|
||||
echo "[${timestamp}] INFO: $message" >> "${LOCAL_LOG_ROOT}/plex-backup-$(date '+%Y-%m-%d').log" 2>/dev/null || true
|
||||
@@ -224,7 +224,8 @@ track_performance() {
|
||||
fi
|
||||
|
||||
# Add performance entry
|
||||
local entry=$(jq -n \
|
||||
local entry
|
||||
entry=$(jq -n \
|
||||
--arg operation "$operation" \
|
||||
--arg duration "$duration" \
|
||||
--arg timestamp "$(date -Iseconds)" \
|
||||
@@ -251,7 +252,8 @@ initialize_logs() {
|
||||
|
||||
# Log synchronization functions
|
||||
sync_logs_to_shared() {
|
||||
local sync_start_time=$(date +%s)
|
||||
local sync_start_time
|
||||
sync_start_time=$(date +%s)
|
||||
log_info "Starting log synchronization to shared location"
|
||||
|
||||
# Ensure shared log directory exists
|
||||
@@ -272,7 +274,8 @@ sync_logs_to_shared() {
|
||||
|
||||
for log_file in "$LOCAL_LOG_ROOT"/*.log "$LOCAL_LOG_ROOT"/*.json; do
|
||||
if [ -f "$log_file" ]; then
|
||||
local filename=$(basename "$log_file")
|
||||
local filename
|
||||
filename=$(basename "$log_file")
|
||||
local shared_file="$SHARED_LOG_ROOT/$filename"
|
||||
|
||||
# Only copy if file doesn't exist in shared location or local is newer
|
||||
@@ -288,7 +291,8 @@ sync_logs_to_shared() {
|
||||
fi
|
||||
done
|
||||
|
||||
local sync_end_time=$(date +%s)
|
||||
local sync_end_time
|
||||
sync_end_time=$(date +%s)
|
||||
local sync_duration=$((sync_end_time - sync_start_time))
|
||||
|
||||
if [ $error_count -eq 0 ]; then
|
||||
@@ -302,7 +306,8 @@ sync_logs_to_shared() {
|
||||
|
||||
# Cleanup old local logs (30 day retention)
|
||||
cleanup_old_local_logs() {
|
||||
local cleanup_start_time=$(date +%s)
|
||||
local cleanup_start_time
|
||||
cleanup_start_time=$(date +%s)
|
||||
log_info "Starting cleanup of old local logs (30+ days)"
|
||||
|
||||
if [ ! -d "$LOCAL_LOG_ROOT" ]; then
|
||||
@@ -315,7 +320,8 @@ cleanup_old_local_logs() {
|
||||
|
||||
# Find and remove log files older than 30 days
|
||||
while IFS= read -r -d '' old_file; do
|
||||
local filename=$(basename "$old_file")
|
||||
local filename
|
||||
filename=$(basename "$old_file")
|
||||
if rm "$old_file" 2>/dev/null; then
|
||||
((cleanup_count++))
|
||||
log_info "Removed old log: $filename"
|
||||
@@ -327,12 +333,15 @@ cleanup_old_local_logs() {
|
||||
|
||||
# Also clean up old performance log entries (keep structure, remove old entries)
|
||||
if [ -f "$PERFORMANCE_LOG_FILE" ]; then
|
||||
local thirty_days_ago=$(date -d '30 days ago' -Iseconds)
|
||||
local thirty_days_ago
|
||||
thirty_days_ago=$(date -d '30 days ago' -Iseconds)
|
||||
local temp_perf_file="${PERFORMANCE_LOG_FILE}.cleanup.tmp"
|
||||
|
||||
if jq --arg cutoff "$thirty_days_ago" '[.[] | select(.timestamp >= $cutoff)]' "$PERFORMANCE_LOG_FILE" > "$temp_perf_file" 2>/dev/null; then
|
||||
local old_count=$(jq length "$PERFORMANCE_LOG_FILE" 2>/dev/null || echo "0")
|
||||
local new_count=$(jq length "$temp_perf_file" 2>/dev/null || echo "0")
|
||||
local old_count
|
||||
old_count=$(jq length "$PERFORMANCE_LOG_FILE" 2>/dev/null || echo "0")
|
||||
local new_count
|
||||
new_count=$(jq length "$temp_perf_file" 2>/dev/null || echo "0")
|
||||
local removed_count=$((old_count - new_count))
|
||||
|
||||
if [ "$removed_count" -gt 0 ]; then
|
||||
@@ -349,7 +358,8 @@ cleanup_old_local_logs() {
|
||||
fi
|
||||
fi
|
||||
|
||||
local cleanup_end_time=$(date +%s)
|
||||
local cleanup_end_time
|
||||
cleanup_end_time=$(date +%s)
|
||||
local cleanup_duration=$((cleanup_end_time - cleanup_start_time))
|
||||
|
||||
if [ $cleanup_count -gt 0 ]; then
|
||||
@@ -366,7 +376,8 @@ send_notification() {
|
||||
local title="$1"
|
||||
local message="$2"
|
||||
local status="${3:-info}" # success, error, warning, info
|
||||
local hostname=$(hostname)
|
||||
local hostname
|
||||
hostname=$(hostname)
|
||||
|
||||
# Console notification
|
||||
case "$status" in
|
||||
@@ -412,16 +423,17 @@ format_backed_up_files() {
|
||||
local files=("$@")
|
||||
local count=${#files[@]}
|
||||
|
||||
if [ $count -eq 0 ]; then
|
||||
if [ "$count" -eq 0 ]; then
|
||||
echo "no files"
|
||||
elif [ $count -eq 1 ]; then
|
||||
elif [ "$count" -eq 1 ]; then
|
||||
echo "${files[0]}"
|
||||
elif [ $count -eq 2 ]; then
|
||||
elif [ "$count" -eq 2 ]; then
|
||||
echo "${files[0]} and ${files[1]}"
|
||||
else
|
||||
local last_file="${files[-1]}"
|
||||
local other_files=("${files[@]:0:$((count-1))}")
|
||||
local other_files_str=$(IFS=', '; echo "${other_files[*]}")
|
||||
local other_files_str
|
||||
other_files_str=$(IFS=', '; echo "${other_files[*]}")
|
||||
echo "${other_files_str}, and ${last_file}"
|
||||
fi
|
||||
}
|
||||
@@ -431,17 +443,20 @@ calculate_checksum() {
|
||||
local file="$1"
|
||||
# Use /tmp for cache files to avoid permission issues
|
||||
local cache_dir="/tmp/plex-backup-cache"
|
||||
local cache_file="$cache_dir/$(echo "$file" | sed 's|/|_|g').md5"
|
||||
local file_mtime=$(stat -c %Y "$file" 2>/dev/null || echo "0")
|
||||
local cache_file="$cache_dir/${file//\//_}.md5"
|
||||
local file_mtime
|
||||
file_mtime=$(stat -c %Y "$file" 2>/dev/null || echo "0")
|
||||
|
||||
# Create cache directory if it doesn't exist
|
||||
mkdir -p "$cache_dir" 2>/dev/null || true
|
||||
|
||||
# Check if cached checksum exists and is newer than file
|
||||
if [ -f "$cache_file" ]; then
|
||||
local cache_mtime=$(stat -c %Y "$cache_file" 2>/dev/null || echo "0")
|
||||
local cache_mtime
|
||||
cache_mtime=$(stat -c %Y "$cache_file" 2>/dev/null || echo "0")
|
||||
if [ "$cache_mtime" -gt "$file_mtime" ]; then
|
||||
local cached_checksum=$(cat "$cache_file" 2>/dev/null)
|
||||
local cached_checksum
|
||||
cached_checksum=$(cat "$cache_file" 2>/dev/null)
|
||||
if [[ -n "$cached_checksum" && "$cached_checksum" =~ ^[a-f0-9]{32}$ ]]; then
|
||||
echo "$cached_checksum"
|
||||
return 0
|
||||
@@ -480,7 +495,8 @@ calculate_checksum() {
|
||||
# Check database integrity using Plex SQLite
|
||||
check_database_integrity() {
|
||||
local db_file="$1"
|
||||
local db_name=$(basename "$db_file")
|
||||
local db_name
|
||||
db_name=$(basename "$db_file")
|
||||
|
||||
log_message "Checking database integrity: $db_name"
|
||||
|
||||
@@ -518,10 +534,13 @@ check_database_integrity() {
|
||||
# Advanced database repair using <https://github.com/ChuckPa/DBRepair/> project methods
|
||||
repair_database() {
|
||||
local db_file="$1"
|
||||
local db_name=$(basename "$db_file")
|
||||
local db_name
|
||||
db_name=$(basename "$db_file")
|
||||
local backup_file="${db_file}.pre-repair-backup"
|
||||
local timestamp=$(date "+%Y-%m-%d_%H.%M.%S")
|
||||
local db_dir=$(dirname "$db_file")
|
||||
local timestamp
|
||||
timestamp=$(date "+%Y-%m-%d_%H.%M.%S")
|
||||
local db_dir
|
||||
db_dir=$(dirname "$db_file")
|
||||
local temp_dir="${db_dir}/repair-temp-${timestamp}"
|
||||
|
||||
log_message "Starting advanced database repair for: $db_name"
|
||||
@@ -554,7 +573,7 @@ repair_database() {
|
||||
local new_db_file="${temp_dir}/${db_name}.new"
|
||||
|
||||
log_message "Step 2: Dumping database to SQL..."
|
||||
if sudo "$PLEX_SQLITE" "$db_file" ".dump" > "$dump_file" 2>/dev/null; then
|
||||
if sudo "$PLEX_SQLITE" "$db_file" ".dump" | sudo tee "$dump_file" >/dev/null 2>&1; then
|
||||
log_success "Database dumped successfully"
|
||||
|
||||
log_message "Step 3: Creating new database from dump..."
|
||||
@@ -627,7 +646,8 @@ handle_wal_files() {
|
||||
)
|
||||
|
||||
for wal_file in "${wal_files[@]}"; do
|
||||
local wal_basename=$(basename "$wal_file")
|
||||
local wal_basename
|
||||
wal_basename=$(basename "$wal_file")
|
||||
|
||||
case "$action" in
|
||||
"backup")
|
||||
@@ -670,7 +690,8 @@ handle_wal_files() {
|
||||
# Enhanced database integrity check with WAL handling
|
||||
check_database_integrity_with_wal() {
|
||||
local db_file="$1"
|
||||
local db_name=$(basename "$db_file")
|
||||
local db_name
|
||||
db_name=$(basename "$db_file")
|
||||
|
||||
log_message "Checking database integrity with WAL handling: $db_name"
|
||||
|
||||
@@ -720,14 +741,16 @@ check_database_integrity_with_wal() {
|
||||
verify_files_parallel() {
|
||||
local backup_dir="$1"
|
||||
local -a pids=()
|
||||
local temp_dir=$(mktemp -d)
|
||||
local temp_dir
|
||||
temp_dir=$(mktemp -d)
|
||||
local verification_errors=0
|
||||
|
||||
if [ "$PARALLEL_VERIFICATION" != true ]; then
|
||||
# Fall back to sequential verification
|
||||
for nickname in "${!PLEX_FILES[@]}"; do
|
||||
local src_file="${PLEX_FILES[$nickname]}"
|
||||
local dest_file="$backup_dir/$(basename "$src_file")"
|
||||
local dest_file
|
||||
dest_file="$backup_dir/$(basename "$src_file")"
|
||||
|
||||
if [ -f "$dest_file" ]; then
|
||||
if ! verify_backup "$src_file" "$dest_file"; then
|
||||
@@ -743,7 +766,8 @@ verify_files_parallel() {
|
||||
# Start verification jobs in parallel
|
||||
for nickname in "${!PLEX_FILES[@]}"; do
|
||||
local src_file="${PLEX_FILES[$nickname]}"
|
||||
local dest_file="$backup_dir/$(basename "$src_file")"
|
||||
local dest_file
|
||||
dest_file="$backup_dir/$(basename "$src_file")"
|
||||
|
||||
if [ -f "$dest_file" ]; then
|
||||
(
|
||||
@@ -767,7 +791,8 @@ verify_files_parallel() {
|
||||
for nickname in "${!PLEX_FILES[@]}"; do
|
||||
local result_file="$temp_dir/$nickname.result"
|
||||
if [ -f "$result_file" ]; then
|
||||
local result=$(cat "$result_file")
|
||||
local result
|
||||
result=$(cat "$result_file")
|
||||
if [ "$result" != "0" ]; then
|
||||
verification_errors=$((verification_errors + 1))
|
||||
fi
|
||||
@@ -861,16 +886,13 @@ verify_backup() {
|
||||
# Enhanced service management with better monitoring
|
||||
manage_plex_service() {
|
||||
local action="$1"
|
||||
local operation_start=$(date +%s)
|
||||
local operation_start
|
||||
operation_start=$(date +%s)
|
||||
|
||||
log_message "Managing Plex service: $action"
|
||||
|
||||
case "$action" in
|
||||
stop)
|
||||
if [ "$action" == "stop" ]; then
|
||||
SERVICE_STOP_TIME=$(date +%s)
|
||||
fi
|
||||
|
||||
if sudo systemctl stop plexmediaserver.service; then
|
||||
log_success "Plex service stopped"
|
||||
# Wait for clean shutdown with progress indicator
|
||||
@@ -897,10 +919,6 @@ manage_plex_service() {
|
||||
fi
|
||||
;;
|
||||
start)
|
||||
if [ "$action" == "start" ]; then
|
||||
SERVICE_START_TIME=$(date +%s)
|
||||
fi
|
||||
|
||||
if sudo systemctl start plexmediaserver.service; then
|
||||
log_success "Plex service start command issued"
|
||||
# Wait for service to be fully running with progress indicator
|
||||
@@ -938,7 +956,8 @@ check_disk_space() {
|
||||
local backup_dir="$1"
|
||||
local required_space_mb="$2"
|
||||
|
||||
local available_space_kb=$(df "$backup_dir" | awk 'NR==2 {print $4}')
|
||||
local available_space_kb
|
||||
available_space_kb=$(df "$backup_dir" | awk 'NR==2 {print $4}')
|
||||
local available_space_mb=$((available_space_kb / 1024))
|
||||
|
||||
if [ "$available_space_mb" -lt "$required_space_mb" ]; then
|
||||
@@ -957,7 +976,8 @@ estimate_backup_size() {
|
||||
for nickname in "${!PLEX_FILES[@]}"; do
|
||||
local file="${PLEX_FILES[$nickname]}"
|
||||
if [ -f "$file" ]; then
|
||||
local size_kb=$(du -k "$file" 2>/dev/null | cut -f1)
|
||||
local size_kb
|
||||
size_kb=$(du -k "$file" 2>/dev/null | cut -f1)
|
||||
total_size=$((total_size + size_kb))
|
||||
fi
|
||||
done
|
||||
@@ -977,10 +997,14 @@ generate_performance_report() {
|
||||
jq -r '.[-10:] | .[] | " \(.operation): \(.duration_seconds)s (\(.timestamp))"' "$PERFORMANCE_LOG_FILE" 2>/dev/null || true
|
||||
|
||||
# Calculate averages for common operations
|
||||
local avg_backup=$(jq '[.[] | select(.operation == "backup") | .duration_seconds] | if length > 0 then add/length else 0 end' "$PERFORMANCE_LOG_FILE" 2>/dev/null || echo "0")
|
||||
local avg_verification=$(jq '[.[] | select(.operation == "verification") | .duration_seconds] | if length > 0 then add/length else 0 end' "$PERFORMANCE_LOG_FILE" 2>/dev/null || echo "0")
|
||||
local avg_service_stop=$(jq '[.[] | select(.operation == "service_stop") | .duration_seconds] | if length > 0 then add/length else 0 end' "$PERFORMANCE_LOG_FILE" 2>/dev/null || echo "0")
|
||||
local avg_service_start=$(jq '[.[] | select(.operation == "service_start") | .duration_seconds] | if length > 0 then add/length else 0 end' "$PERFORMANCE_LOG_FILE" 2>/dev/null || echo "0")
|
||||
local avg_backup
|
||||
avg_backup=$(jq '[.[] | select(.operation == "backup") | .duration_seconds] | if length > 0 then add/length else 0 end' "$PERFORMANCE_LOG_FILE" 2>/dev/null || echo "0")
|
||||
local avg_verification
|
||||
avg_verification=$(jq '[.[] | select(.operation == "verification") | .duration_seconds] | if length > 0 then add/length else 0 end' "$PERFORMANCE_LOG_FILE" 2>/dev/null || echo "0")
|
||||
local avg_service_stop
|
||||
avg_service_stop=$(jq '[.[] | select(.operation == "service_stop") | .duration_seconds] | if length > 0 then add/length else 0 end' "$PERFORMANCE_LOG_FILE" 2>/dev/null || echo "0")
|
||||
local avg_service_start
|
||||
avg_service_start=$(jq '[.[] | select(.operation == "service_start") | .duration_seconds] | if length > 0 then add/length else 0 end' "$PERFORMANCE_LOG_FILE" 2>/dev/null || echo "0")
|
||||
|
||||
if [ "$avg_backup" != "0" ]; then
|
||||
log_info "Average backup time: ${avg_backup}s"
|
||||
@@ -1004,7 +1028,8 @@ cleanup_old_backups() {
|
||||
find "${BACKUP_ROOT}" -maxdepth 1 -type f -name "plex-backup-*.tar.gz" -mtime +${MAX_BACKUP_AGE_DAYS} -delete 2>/dev/null || true
|
||||
|
||||
# Keep only MAX_BACKUPS_TO_KEEP most recent backups
|
||||
local backup_count=$(find "${BACKUP_ROOT}" -maxdepth 1 -type f -name "plex-backup-*.tar.gz" | wc -l)
|
||||
local backup_count
|
||||
backup_count=$(find "${BACKUP_ROOT}" -maxdepth 1 -type f -name "plex-backup-*.tar.gz" | wc -l)
|
||||
|
||||
if [ "$backup_count" -gt "$MAX_BACKUPS_TO_KEEP" ]; then
|
||||
local excess_count=$((backup_count - MAX_BACKUPS_TO_KEEP))
|
||||
@@ -1103,7 +1128,8 @@ check_integrity_only() {
|
||||
|
||||
# Main backup function
|
||||
main() {
|
||||
local overall_start=$(date +%s)
|
||||
local overall_start
|
||||
overall_start=$(date +%s)
|
||||
|
||||
log_message "Starting enhanced Plex backup process at $(date)"
|
||||
send_notification "Backup Started" "Plex backup process initiated" "info"
|
||||
@@ -1118,11 +1144,13 @@ main() {
|
||||
# Check if only doing integrity check
|
||||
if [ "$INTEGRITY_CHECK_ONLY" = true ]; then
|
||||
check_integrity_only
|
||||
# shellcheck disable=SC2317
|
||||
return $?
|
||||
fi
|
||||
|
||||
# Estimate backup size
|
||||
local estimated_size_mb=$(estimate_backup_size)
|
||||
local estimated_size_mb
|
||||
estimated_size_mb=$(estimate_backup_size)
|
||||
log_message "Estimated backup size: ${estimated_size_mb}MB"
|
||||
|
||||
# Check disk space (require 2x estimated size for safety)
|
||||
@@ -1217,7 +1245,8 @@ main() {
|
||||
handle_wal_files "backup" "$BACKUP_PATH"
|
||||
|
||||
# Backup files - always perform full backup
|
||||
local backup_start=$(date +%s)
|
||||
local backup_start
|
||||
backup_start=$(date +%s)
|
||||
for nickname in "${!PLEX_FILES[@]}"; do
|
||||
local file="${PLEX_FILES[$nickname]}"
|
||||
|
||||
@@ -1225,7 +1254,8 @@ main() {
|
||||
log_message "Backing up: $(basename "$file")"
|
||||
|
||||
# Create backup filename without timestamp (use original filename)
|
||||
local backup_file="${BACKUP_PATH}/$(basename "$file")"
|
||||
local backup_file
|
||||
backup_file="${BACKUP_PATH}/$(basename "$file")"
|
||||
|
||||
# Copy file
|
||||
if sudo cp "$file" "$backup_file"; then
|
||||
@@ -1269,14 +1299,17 @@ main() {
|
||||
log_error "Backup root directory is not writable: $BACKUP_ROOT"
|
||||
backup_errors=$((backup_errors + 1))
|
||||
else
|
||||
local temp_archive="/tmp/plex-backup-$(date '+%Y%m%d_%H%M%S').tar.gz"
|
||||
local final_archive="${BACKUP_ROOT}/plex-backup-$(date '+%Y%m%d_%H%M%S').tar.gz"
|
||||
local temp_archive
|
||||
temp_archive="/tmp/plex-backup-$(date '+%Y%m%d_%H%M%S').tar.gz"
|
||||
local final_archive
|
||||
final_archive="${BACKUP_ROOT}/plex-backup-$(date '+%Y%m%d_%H%M%S').tar.gz"
|
||||
|
||||
log_info "Temporary archive: $temp_archive"
|
||||
log_info "Final archive: $final_archive"
|
||||
|
||||
# Create archive in /tmp first, containing only the backed up files
|
||||
local temp_dir="/tmp/plex-backup-staging-$(date '+%Y%m%d_%H%M%S')"
|
||||
local temp_dir
|
||||
temp_dir="/tmp/plex-backup-staging-$(date '+%Y%m%d_%H%M%S')"
|
||||
if ! mkdir -p "$temp_dir"; then
|
||||
log_error "Failed to create staging directory: $temp_dir"
|
||||
backup_errors=$((backup_errors + 1))
|
||||
@@ -1287,7 +1320,8 @@ main() {
|
||||
local files_staged=0
|
||||
for nickname in "${!PLEX_FILES[@]}"; do
|
||||
local file="${PLEX_FILES[$nickname]}"
|
||||
local backup_file="${BACKUP_PATH}/$(basename "$file")"
|
||||
local backup_file
|
||||
backup_file="${BACKUP_PATH}/$(basename "$file")"
|
||||
if [ -f "$backup_file" ]; then
|
||||
if cp "$backup_file" "$temp_dir/"; then
|
||||
files_staged=$((files_staged + 1))
|
||||
@@ -1309,9 +1343,11 @@ main() {
|
||||
log_info "Staged $files_staged files for archive creation"
|
||||
|
||||
# Check disk space in /tmp
|
||||
local temp_available_kb=$(df /tmp | awk 'NR==2 {print $4}')
|
||||
local temp_available_kb
|
||||
temp_available_kb=$(df /tmp | awk 'NR==2 {print $4}')
|
||||
local temp_available_mb=$((temp_available_kb / 1024))
|
||||
local staging_size_mb=$(du -sm "$temp_dir" | cut -f1)
|
||||
local staging_size_mb
|
||||
staging_size_mb=$(du -sm "$temp_dir" | cut -f1)
|
||||
log_info "/tmp available space: ${temp_available_mb}MB, staging directory size: ${staging_size_mb}MB"
|
||||
|
||||
# Check if we have enough space (require 3x staging size for compression)
|
||||
@@ -1330,7 +1366,8 @@ main() {
|
||||
if [ $tar_exit_code -eq 0 ]; then
|
||||
# Verify archive was actually created and has reasonable size
|
||||
if [ -f "$temp_archive" ]; then
|
||||
local archive_size_mb=$(du -sm "$temp_archive" | cut -f1)
|
||||
local archive_size_mb
|
||||
archive_size_mb=$(du -sm "$temp_archive" | cut -f1)
|
||||
log_success "Archive created successfully: $(basename "$temp_archive") (${archive_size_mb}MB)"
|
||||
|
||||
# Test archive integrity before moving
|
||||
@@ -1345,7 +1382,8 @@ main() {
|
||||
rm -rf "$temp_dir"
|
||||
for nickname in "${!PLEX_FILES[@]}"; do
|
||||
local file="${PLEX_FILES[$nickname]}"
|
||||
local backup_file="${BACKUP_PATH}/$(basename "$file")"
|
||||
local backup_file
|
||||
backup_file="${BACKUP_PATH}/$(basename "$file")"
|
||||
rm -f "$backup_file" "$backup_file.md5"
|
||||
done
|
||||
else
|
||||
@@ -1374,11 +1412,12 @@ main() {
|
||||
|
||||
# Additional diagnostic information
|
||||
log_error "Staging directory contents:"
|
||||
ls -la "$temp_dir" 2>&1 | while IFS= read -r line; do
|
||||
find "$temp_dir" -ls 2>&1 | while IFS= read -r line; do
|
||||
log_error " $line"
|
||||
done
|
||||
|
||||
local temp_usage=$(df -h /tmp | awk 'NR==2 {print "Used: " $3 "/" $2 " (" $5 ")"}')
|
||||
local temp_usage
|
||||
temp_usage=$(df -h /tmp | awk 'NR==2 {print "Used: " $3 "/" $2 " (" $5 ")"}')
|
||||
log_error "Temp filesystem status: $temp_usage"
|
||||
|
||||
rm -rf "$temp_dir"
|
||||
@@ -1390,7 +1429,8 @@ main() {
|
||||
fi
|
||||
|
||||
# Send notification
|
||||
local files_list=$(format_backed_up_files "${backed_up_files[@]}")
|
||||
local files_list
|
||||
files_list=$(format_backed_up_files "${backed_up_files[@]}")
|
||||
send_notification "Backup Completed" "Successfully backed up $files_list" "success"
|
||||
else
|
||||
log_message "No files needed backup"
|
||||
@@ -1426,7 +1466,8 @@ main() {
|
||||
exit 1
|
||||
else
|
||||
log_success "Enhanced backup completed successfully"
|
||||
local files_list=$(format_backed_up_files "${backed_up_files[@]}")
|
||||
local files_list
|
||||
files_list=$(format_backed_up_files "${backed_up_files[@]}")
|
||||
send_notification "Backup Success" "$files_list backed up successfully in ${total_time}s" "success"
|
||||
fi
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user