feat: Revamp Plex backup system to streamline archive structure and enhance validation processes

This commit is contained in:
Peter Wood
2025-05-26 07:51:24 -04:00
parent fbd0bf5852
commit 68f7f4ef8e
7 changed files with 390 additions and 162 deletions

View File

@@ -129,10 +129,10 @@ Specialized backup system for Plex Media Server with database-aware features:
./validate-plex-backups.sh
# Test restore without making changes (dry run)
./restore-plex.sh 20250125 --dry-run
./restore-plex.sh plex-backup-20250125_143022.tar.gz --dry-run
# Restore from specific backup
./restore-plex.sh 20250125
# Restore from specific backup archive
./restore-plex.sh plex-backup-20250125_143022.tar.gz
```
## Automation and Scheduling
@@ -183,10 +183,32 @@ The enhanced media backup script includes configurable parameters at the top of
The Plex backup script configuration parameters:
- `MAX_BACKUP_AGE_DAYS=30`: Remove backups older than 30 days
- `MAX_BACKUPS_TO_KEEP=10`: Keep maximum of 10 backup sets
- `BACKUP_ROOT`: Location for backup storage
- `MAX_BACKUPS_TO_KEEP=10`: Keep maximum of 10 backup archives
- `BACKUP_ROOT`: Location for compressed backup archives
- `LOG_ROOT`: Location for backup logs
### Final Backup Directory Structure
The enhanced Plex backup system creates a streamlined archive-only structure:
```bash
/mnt/share/media/backups/plex/
├── plex-backup-20250125_143022.tar.gz # Latest backup
├── plex-backup-20250124_143011.tar.gz # Previous backup
├── plex-backup-20250123_143008.tar.gz # Older backup
└── logs/
├── backup_log_20250125_143022.md
├── plex-backup-performance.json
└── plex-backup.json
```
**Key Benefits:**
- **Direct Archive Storage**: No intermediate directories required
- **Efficient Space Usage**: Only compressed files stored permanently
- **Easy Management**: Timestamp-based naming for clear identification
- **Automatic Cleanup**: Legacy dated directories removed automatically
### Recommended Backup Strategy
Both systems implement a robust backup strategy following industry best practices:

View File

@@ -466,7 +466,7 @@ handle_wal_files() {
"backup")
if [ -f "$wal_file" ]; then
log_info "Found WAL/SHM file: $wal_basename"
local backup_file="${backup_path}/${wal_basename}.$(date '+%Y%m%d_%H%M%S')"
local backup_file="${backup_path}/${wal_basename}"
if sudo cp "$wal_file" "$backup_file"; then
log_success "Backed up WAL/SHM file: $wal_basename"
@@ -799,20 +799,23 @@ cleanup_old_backups() {
log_message "Cleaning up old backups..."
# Remove backups older than MAX_BACKUP_AGE_DAYS
find "${BACKUP_ROOT}" -maxdepth 1 -type d -name "????????" -mtime +${MAX_BACKUP_AGE_DAYS} -exec rm -rf {} \; 2>/dev/null || true
find "${BACKUP_ROOT}" -maxdepth 1 -type f -name "plex-backup-*.tar.gz" -mtime +${MAX_BACKUP_AGE_DAYS} -delete 2>/dev/null || true
# Keep only MAX_BACKUPS_TO_KEEP most recent backups
local backup_count=$(find "${BACKUP_ROOT}" -maxdepth 1 -type d -name "????????" | wc -l)
local backup_count=$(find "${BACKUP_ROOT}" -maxdepth 1 -type f -name "plex-backup-*.tar.gz" | wc -l)
if [ "$backup_count" -gt "$MAX_BACKUPS_TO_KEEP" ]; then
local excess_count=$((backup_count - MAX_BACKUPS_TO_KEEP))
log_message "Removing $excess_count old backup(s)..."
find "${BACKUP_ROOT}" -maxdepth 1 -type d -name "????????" -printf '%T@ %p\n' | \
find "${BACKUP_ROOT}" -maxdepth 1 -type f -name "plex-backup-*.tar.gz" -printf '%T@ %p\n' | \
sort -n | head -n "$excess_count" | cut -d' ' -f2- | \
xargs -r rm -rf
xargs -r rm -f
fi
# Clean up any remaining dated directories from old backup structure
find "${BACKUP_ROOT}" -maxdepth 1 -type d -name "????????" -exec rm -rf {} \; 2>/dev/null || true
log_message "Backup cleanup completed"
}
@@ -929,9 +932,9 @@ main() {
local backup_errors=0
local files_backed_up=0
local BACKUP_PATH="${BACKUP_ROOT}/$(date '+%Y%m%d')"
local BACKUP_PATH="${BACKUP_ROOT}"
# Create today's backup directory
# Ensure backup root directory exists
mkdir -p "$BACKUP_PATH"
# Handle WAL files and check database integrity before backup
@@ -989,8 +992,8 @@ main() {
if needs_backup "$file" ]; then
log_message "Backing up: $(basename "$file")"
# Create backup filename with timestamp
local backup_file="${BACKUP_PATH}/$(basename "$file").$(date '+%Y%m%d_%H%M%S')"
# Create backup filename without timestamp (use original filename)
local backup_file="${BACKUP_PATH}/$(basename "$file")"
# Copy file
if sudo cp "$file" "$backup_file"; then
@@ -1029,32 +1032,136 @@ main() {
# Create archive if files were backed up
if [ "$files_backed_up" -gt 0 ]; then
log_message "Creating compressed archive..."
local temp_archive="/tmp/plex-backup-$(date '+%Y%m%d_%H%M%S').tar.gz"
local final_archive="${BACKUP_PATH}/plex-backup-$(date '+%Y%m%d_%H%M%S').tar.gz"
# Create archive in /tmp first to avoid "file changed" issues
if tar --exclude="*.tar.gz" -czf "$temp_archive" -C "$(dirname "$BACKUP_PATH")" "$(basename "$BACKUP_PATH")"; then
# Move the completed archive to the backup directory
if mv "$temp_archive" "$final_archive"; then
log_success "Archive created: $(basename "$final_archive")"
# Remove individual backup files, keep only the archive
find "$BACKUP_PATH" -type f ! -name "*.tar.gz" -delete
# Check backup root directory is writable
if [ ! -w "$BACKUP_ROOT" ]; then
log_error "Backup root directory is not writable: $BACKUP_ROOT"
backup_errors=$((backup_errors + 1))
else
log_error "Failed to move archive to final location"
rm -f "$temp_archive"
local temp_archive="/tmp/plex-backup-$(date '+%Y%m%d_%H%M%S').tar.gz"
local final_archive="${BACKUP_ROOT}/plex-backup-$(date '+%Y%m%d_%H%M%S').tar.gz"
log_info "Temporary archive: $temp_archive"
log_info "Final archive: $final_archive"
# Create archive in /tmp first, containing only the backed up files
local temp_dir="/tmp/plex-backup-staging-$(date '+%Y%m%d_%H%M%S')"
if ! mkdir -p "$temp_dir"; then
log_error "Failed to create staging directory: $temp_dir"
backup_errors=$((backup_errors + 1))
else
log_info "Created staging directory: $temp_dir"
# Copy backed up files to staging directory
local files_staged=0
for nickname in "${!PLEX_FILES[@]}"; do
local file="${PLEX_FILES[$nickname]}"
local backup_file="${BACKUP_PATH}/$(basename "$file")"
if [ -f "$backup_file" ]; then
if cp "$backup_file" "$temp_dir/"; then
files_staged=$((files_staged + 1))
log_info "Staged for archive: $(basename "$backup_file")"
else
log_warning "Failed to stage file: $(basename "$backup_file")"
fi
else
log_warning "Backup file not found for staging: $(basename "$backup_file")"
fi
done
# Check if any files were staged
if [ "$files_staged" -eq 0 ]; then
log_error "No files were staged for archive creation"
rm -rf "$temp_dir"
backup_errors=$((backup_errors + 1))
else
log_info "Staged $files_staged files for archive creation"
# Check disk space in /tmp
local temp_available_kb=$(df /tmp | awk 'NR==2 {print $4}')
local temp_available_mb=$((temp_available_kb / 1024))
local staging_size_mb=$(du -sm "$temp_dir" | cut -f1)
log_info "/tmp available space: ${temp_available_mb}MB, staging directory size: ${staging_size_mb}MB"
# Check if we have enough space (require 3x staging size for compression)
local required_space_mb=$((staging_size_mb * 3))
if [ "$temp_available_mb" -lt "$required_space_mb" ]; then
log_error "Insufficient space in /tmp for archive creation. Required: ${required_space_mb}MB, Available: ${temp_available_mb}MB"
rm -rf "$temp_dir"
backup_errors=$((backup_errors + 1))
else
# Create archive with detailed error logging
log_info "Creating archive: $(basename "$temp_archive")"
local tar_output
tar_output=$(tar -czf "$temp_archive" -C "$temp_dir" . 2>&1)
local tar_exit_code=$?
if [ $tar_exit_code -eq 0 ]; then
# Verify archive was actually created and has reasonable size
if [ -f "$temp_archive" ]; then
local archive_size_mb=$(du -sm "$temp_archive" | cut -f1)
log_success "Archive created successfully: $(basename "$temp_archive") (${archive_size_mb}MB)"
# Test archive integrity before moving
if tar -tzf "$temp_archive" >/dev/null 2>&1; then
log_success "Archive integrity verified"
# Move the completed archive to the backup root
if mv "$temp_archive" "$final_archive"; then
log_success "Archive moved to final location: $(basename "$final_archive")"
# Remove individual backup files and staging directory
rm -rf "$temp_dir"
for nickname in "${!PLEX_FILES[@]}"; do
local file="${PLEX_FILES[$nickname]}"
local backup_file="${BACKUP_PATH}/$(basename "$file")"
rm -f "$backup_file" "$backup_file.md5"
done
else
log_error "Failed to move archive to final location: $final_archive"
log_error "Temporary archive remains at: $temp_archive"
rm -rf "$temp_dir"
backup_errors=$((backup_errors + 1))
fi
else
log_error "Failed to create archive"
log_error "Archive integrity check failed - archive may be corrupted"
log_error "Archive size: ${archive_size_mb}MB"
rm -f "$temp_archive"
rm -rf "$temp_dir"
backup_errors=$((backup_errors + 1))
fi
else
log_error "Archive file was not created despite tar success"
rm -rf "$temp_dir"
backup_errors=$((backup_errors + 1))
fi
else
log_error "Failed to create archive (tar exit code: $tar_exit_code)"
if [ -n "$tar_output" ]; then
log_error "Tar command output: $tar_output"
fi
# Additional diagnostic information
log_error "Staging directory contents:"
ls -la "$temp_dir" 2>&1 | while IFS= read -r line; do
log_error " $line"
done
local temp_usage=$(df -h /tmp | awk 'NR==2 {print "Used: " $3 "/" $2 " (" $5 ")"}')
log_error "Temp filesystem status: $temp_usage"
rm -rf "$temp_dir"
backup_errors=$((backup_errors + 1))
fi
fi
fi
fi
fi
# Send notification
send_notification "Backup Completed" "Successfully backed up $files_backed_up files" "success"
else
log_message "No files needed backup, removing empty backup directory"
rmdir "$BACKUP_PATH" 2>/dev/null || true
log_message "No files needed backup"
fi
# Cleanup old backups

View File

@@ -302,7 +302,38 @@ The script includes a comprehensive testing framework (`test-plex-backup.sh`):
## Backup Strategy
The enhanced script implements a robust backup strategy:
The enhanced script implements a robust backup strategy with a streamlined tar.gz-only structure:
### Archive-Only Directory Structure
The new backup system eliminates intermediate dated directories and stores only compressed archives:
```bash
/mnt/share/media/backups/plex/
├── plex-backup-20250125_143022.tar.gz # Latest backup
├── plex-backup-20250124_143011.tar.gz # Previous backup
├── plex-backup-20250123_143008.tar.gz # Older backup
└── logs/
├── backup_log_20250125_143022.md
├── plex-backup-performance.json
└── plex-backup.json
```
### Archive Naming Convention
Backup files follow the pattern: `plex-backup-YYYYMMDD_HHMMSS.tar.gz`
- **YYYYMMDD**: Date of backup (e.g., 20250125)
- **HHMMSS**: Time of backup (e.g., 143022)
- **tar.gz**: Compressed archive format
### Key Improvements
1. **Direct Archive Creation**: No intermediate directories required
2. **Efficient Storage**: Only compressed files stored permanently
3. **Easy Identification**: Timestamp-based naming for sorting
4. **Legacy Cleanup**: Automatic removal of old dated directories
5. **Archive Validation**: Integrity checking of compressed files
### 3-2-1 Backup Rule

View File

@@ -20,43 +20,55 @@ The enhanced script performs the following advanced tasks:
## Enhanced Features
### Performance Tracking
- **JSON Performance Logs**: All operations are timed and logged to `logs/plex-backup-performance.json`
- **Performance Reports**: Automatic generation of average performance metrics
- **Operation Monitoring**: Tracks backup, verification, service management, and overall script execution times
### Notification System
The script supports multiple notification channels:
#### Console Notifications
- Color-coded status messages (Success: Green, Error: Red, Warning: Yellow, Info: Blue)
- Timestamped log entries with clear formatting
#### Webhook Notifications
```bash
./backup-plex.sh --webhook=https://your-webhook-url.com/endpoint
```
Sends JSON payloads with backup status, hostname, and timestamps.
#### Email Notifications
```bash
./backup-plex.sh --email=admin@example.com
```
Requires `sendmail` to be configured on the system.
### WAL File Management
The script now properly handles SQLite Write-Ahead Logging files:
- **Automatic Detection**: Identifies and backs up `.db-wal` and `.db-shm` files when present
- **WAL Checkpointing**: Performs `PRAGMA wal_checkpoint(FULL)` before integrity checks
- **Safe Backup**: Ensures WAL files are properly backed up alongside main database files
### Database Integrity & Repair
Enhanced database management features:
- **Pre-backup Integrity Checks**: Verifies database health before backup operations
- **Automated Repair**: Optional automatic repair of corrupted databases using advanced techniques
- **Interactive Repair Mode**: Prompts for repair decisions when issues are detected
- **Post-repair Verification**: Re-checks integrity after repair operations
### Parallel Processing
- **Concurrent Verification**: Parallel backup verification for improved performance
- **Fallback Safety**: Automatically falls back to sequential processing if parallel mode fails
- **Configurable**: Can be disabled with `--no-parallel` for maximum safety
@@ -120,65 +132,62 @@ fi
This block checks if the Plex Media Server service is running. If it is, the script stops the service using a custom script (`plex.sh`).
### 5. Create Backup Directory
### 5. Backup Plex Database Files and Preferences
The enhanced backup system now creates compressed archives directly, eliminating intermediate directories:
```bash
mkdir -p /mnt/share/media/backups/plex/$(date +%Y%m%d)/ || { echo "Failed to create backup directory"; exit 1; }
# Files are copied to temporary staging area for verification
cp "/var/lib/plexmediaserver/Library/Application Support/Plex Media Server/Plug-in Support/Databases/com.plexapp.plugins.library.db" "$BACKUP_PATH/"
log_file_details "com.plexapp.plugins.library.db" "$BACKUP_PATH/"
cp "/var/lib/plexmediaserver/Library/Application Support/Plex Media Server/Plug-in Support/Databases/com.plexapp.plugins.library.blobs.db" "$BACKUP_PATH/"
log_file_details "com.plexapp.plugins.library.blobs.db" "$BACKUP_PATH/"
cp "/var/lib/plexmediaserver/Library/Application Support/Plex Media Server/Preferences.xml" "$BACKUP_PATH/"
log_file_details "Preferences.xml" "$BACKUP_PATH/"
```
This command creates a backup directory with the current date. If the directory creation fails, the script exits with an error message.
These commands copy the Plex database files and preferences directly to the backup root directory. Each file copy operation includes integrity verification and checksum validation.
### 6. Copy Plex Database Files and Preferences
### 6. Create Compressed Archive
```bash
cp "/var/lib/plexmediaserver/Library/Application Support/Plex Media Server/Plug-in Support/Databases/com.plexapp.plugins.library.db" /mnt/share/media/backups/plex/$(date +%Y%m%d)/ || { echo "Failed to copy com.plexapp.plugins.library.db"; exit 1; }
log_file_details "com.plexapp.plugins.library.db" "/mnt/share/media/backups/plex/$(date +%Y%m%d)/"
cp "/var/lib/plexmediaserver/Library/Application Support/Plex Media Server/Plug-in Support/Databases/com.plexapp.plugins.library.blobs.db" /mnt/share/media/backups/plex/$(date +%Y%m%d)/ || { echo "Failed to copy com.plexapp.plugins.library.blobs.db"; exit 1; }
log_file_details "com.plexapp.plugins.library.blobs.db" "/mnt/share/media/backups/plex/$(date +%Y%m%d)/"
cp "/var/lib/plexmediaserver/Library/Application Support/Plex Media Server/Preferences.xml" /mnt/share/media/backups/plex/$(date +%Y%m%d)/ || { echo "Failed to copy Preferences.xml"; exit 1; }
log_file_details "Preferences.xml" "/mnt/share/media/backups/plex/$(date +%Y%m%d)/"
# Create archive directly with timestamp naming convention
final_archive="${BACKUP_ROOT}/plex-backup-$(date '+%Y%m%d_%H%M%S').tar.gz"
tar -czf "$final_archive" -C "$temp_staging_dir" .
```
These commands copy the Plex database files and preferences to the backup directory. Each file copy operation is followed by a call to the `log_file_details` function to log the details of the copied files.
The system now creates compressed archives directly using a timestamp-based naming convention (`plex-backup-YYYYMMDD_HHMMSS.tar.gz`), eliminating the need for intermediate dated directories.
### 7. Compress the Backup Directory
### 7. Archive Validation and Cleanup
```bash
tar -czf /mnt/share/media/backups/plex/$(date +%Y%m%d).tar.gz -C /mnt/share/media/backups/plex/plex $(date +%Y%m%d) || { echo "Failed to compress backup folder"; exit 1; }
```
This command compresses the backup directory into a gzip archive. If the compression fails, the script exits with an error message.
### 8. Delete Original Backup Directory
```bash
if [ $? -eq 0 ]; then
if [ -s /mnt/share/media/backups/plex/$(date +%Y%m%d).tar.gz ]; then
rm -rf /mnt/share/media/backups/plex/$(date +%Y%m%d)/ || { echo "Failed to delete original backup folder"; exit 1; }
# Validate archive integrity
if tar -tzf "$final_archive" >/dev/null 2>&1; then
log_success "Archive created and validated: $(basename "$final_archive")"
# Clean up temporary staging files
rm -rf "$temp_staging_dir"
else
echo "Compressed file is empty, not deleting the backup folder" >> "$LOG_FILE"
fi
else
echo "Compression failed, not deleting the backup folder" >> "$LOG_FILE"
log_error "Archive validation failed"
rm -f "$final_archive"
fi
```
This block checks if the compression was successful. If it was, and the compressed file is not empty, it deletes the original backup directory. If the compression failed or the compressed file is empty, it logs an appropriate message.
The system validates the created archive and removes temporary staging files, ensuring only valid compressed backups are retained in the backup root directory.
### 9. Send Notification
### 8. Send Notification
```bash
curl \
-H tags:popcorn,backup,plex,${HOSTNAME} \
-d "The Plex databases have been saved to the /media/backups/plex folder" \
-d "The Plex databases have been saved to the /media/backups/plex folder as plex-backup-YYYYMMDD_HHMMSS.tar.gz" \
https://notify.peterwood.rocks/lab || { echo "Failed to send notification"; exit 1; }
```
This command sends a notification upon completion of the backup process. If the notification fails, the script exits with an error message.
This command sends a notification upon completion of the backup process, indicating the compressed archive has been created. If the notification fails, the script exits with an error message.
### 10. Restart Plex Media Server Service
### 9. Restart Plex Media Server Service
```bash
if systemctl is-enabled --quiet plexmediaserver.service; then
@@ -188,11 +197,36 @@ fi
This block checks if the Plex Media Server service is enabled. If it is, the script restarts the service using a custom script (`plex.sh`).
### 10. Legacy Cleanup
```bash
# Clean up any remaining dated directories from old backup structure
find "${BACKUP_ROOT}" -maxdepth 1 -type d -name "????????" -exec rm -rf {} \; 2>/dev/null || true
```
The enhanced system includes cleanup of legacy dated directories from previous backup structure versions, ensuring a clean tar.gz-only backup directory.
## Important Information
- Ensure that the [`plex.sh`](https://github.com/acedanger/shell/blob/main/plex.sh) script is available and executable. This script is used to stop and start the Plex Media Server service.
- The script uses `systemctl` to manage the Plex Media Server service. Ensure that `systemctl` is available on your system.
- The backup directory and log directory paths are hardcoded. Modify these paths as needed to fit your environment.
- The script logs important actions and errors to a log file with a timestamped filename. Check the log file for details if any issues arise.
- **New Directory Structure**: The enhanced backup system stores only compressed `.tar.gz` files directly in the backup root directory, eliminating intermediate dated directories.
- **Archive Naming**: Backup files follow the naming convention `plex-backup-YYYYMMDD_HHMMSS.tar.gz` for easy identification and sorting.
- **Legacy Compatibility**: The system automatically cleans up old dated directories from previous backup versions during operation.
- The backup directory path is configurable through the `BACKUP_ROOT` variable. Modify this path as needed to fit your environment.
- The script logs important actions and errors to timestamped log files. Check the log files for details if any issues arise.
- **Backup Validation**: All archives undergo integrity checking to ensure backup reliability.
By following this documentation, you should be able to understand and use the `backup-plex.sh` script effectively.
## Final Directory Structure
```
/mnt/share/media/backups/plex/
├── plex-backup-20250125_143022.tar.gz
├── plex-backup-20250124_143011.tar.gz
├── plex-backup-20250123_143008.tar.gz
└── logs/
├── backup_log_20250125_143022.md
└── plex-backup-performance.json
```
By following this documentation, you should be able to understand and use the enhanced `backup-plex.sh` script effectively with its new streamlined tar.gz-only structure.

View File

@@ -132,16 +132,18 @@ check_backup_status() {
# Count total backups
local backup_count=0
if [ -d "$BACKUP_ROOT" ]; then
backup_count=$(find "$BACKUP_ROOT" -maxdepth 1 -type d -name "????????" 2>/dev/null | wc -l)
backup_count=$(find "$BACKUP_ROOT" -maxdepth 1 -type f -name "plex-backup-*.tar.gz" 2>/dev/null | wc -l)
fi
if [ "$backup_count" -gt 0 ]; then
log_status "OK" "Total backups: $backup_count"
# Find latest backup
local latest_backup=$(find "$BACKUP_ROOT" -maxdepth 1 -type d -name "????????" 2>/dev/null | sort | tail -1)
local latest_backup=$(find "$BACKUP_ROOT" -maxdepth 1 -type f -name "plex-backup-*.tar.gz" 2>/dev/null | sort | tail -1)
if [ -n "$latest_backup" ]; then
local backup_date=$(basename "$latest_backup")
local backup_filename=$(basename "$latest_backup")
# Extract date from filename: plex-backup-YYYYMMDD_HHMMSS.tar.gz
local backup_date=$(echo "$backup_filename" | sed 's/plex-backup-//' | sed 's/_.*$//')
local readable_date=$(date -d "${backup_date:0:4}-${backup_date:4:2}-${backup_date:6:2}" '+%B %d, %Y' 2>/dev/null || echo "Invalid date")
local backup_age_days=$(( ($(date +%s) - $(date -d "${backup_date:0:4}-${backup_date:4:2}-${backup_date:6:2}" +%s 2>/dev/null || echo "0")) / 86400 ))
@@ -157,8 +159,8 @@ check_backup_status() {
local backup_size=$(du -sh "$latest_backup" 2>/dev/null | cut -f1)
log_status "INFO" "Latest backup size: $backup_size"
# Check backup contents
local file_count=$(ls -1 "$latest_backup" 2>/dev/null | wc -l)
# Check backup contents (via tar listing)
local file_count=$(tar -tzf "$latest_backup" 2>/dev/null | wc -l)
log_status "INFO" "Files in latest backup: $file_count"
fi
else
@@ -322,9 +324,11 @@ show_recommendations() {
# Check backup age
if [ -d "$BACKUP_ROOT" ]; then
local latest_backup=$(find "$BACKUP_ROOT" -maxdepth 1 -type d -name "????????" 2>/dev/null | sort | tail -1)
local latest_backup=$(find "$BACKUP_ROOT" -maxdepth 1 -type f -name "plex-backup-*.tar.gz" 2>/dev/null | sort | tail -1)
if [ -n "$latest_backup" ]; then
local backup_date=$(basename "$latest_backup")
local backup_filename=$(basename "$latest_backup")
# Extract date from filename: plex-backup-YYYYMMDD_HHMMSS.tar.gz
local backup_date=$(echo "$backup_filename" | sed 's/plex-backup-//' | sed 's/_.*$//')
local backup_age_days=$(( ($(date +%s) - $(date -d "${backup_date:0:4}-${backup_date:4:2}-${backup_date:6:2}" +%s 2>/dev/null || echo "0")) / 86400 ))
if [ "$backup_age_days" -gt 7 ]; then
recommendations+=("Consider running a manual backup - latest backup is $backup_age_days days old")

View File

@@ -41,34 +41,44 @@ log_warning() {
# List available backups
list_backups() {
log_message "Available backups:"
find "$BACKUP_ROOT" -maxdepth 1 -type d -name "????????" | sort -r | while read backup_dir; do
local backup_date=$(basename "$backup_dir")
local readable_date=$(date -d "${backup_date:0:4}-${backup_date:4:2}-${backup_date:6:2}" '+%B %d, %Y')
local file_count=$(ls -1 "$backup_dir" 2>/dev/null | wc -l)
echo " $backup_date ($readable_date) - $file_count files"
find "$BACKUP_ROOT" -maxdepth 1 -type f -name "plex-backup-*.tar.gz" | sort -r | while read backup_file; do
local backup_name=$(basename "$backup_file")
local backup_date=$(echo "$backup_name" | sed 's/plex-backup-\([0-9]\{8\}\)_[0-9]\{6\}\.tar\.gz/\1/')
if [[ "$backup_date" =~ ^[0-9]{8}$ ]]; then
local readable_date=$(date -d "${backup_date:0:4}-${backup_date:4:2}-${backup_date:6:2}" '+%B %d, %Y' 2>/dev/null || echo "Unknown date")
local file_size=$(du -h "$backup_file" 2>/dev/null | cut -f1)
echo " $backup_name ($readable_date) - $file_size"
else
echo " $backup_name - $(du -h "$backup_file" 2>/dev/null | cut -f1)"
fi
done
}
# Validate backup integrity
validate_backup() {
local backup_date="$1"
local backup_dir="$BACKUP_ROOT/$backup_date"
local backup_file="$1"
if [ ! -d "$backup_dir" ]; then
log_error "Backup directory not found: $backup_dir"
if [ ! -f "$backup_file" ]; then
log_error "Backup file not found: $backup_file"
return 1
fi
log_message "Validating backup integrity for $backup_date..."
log_message "Validating backup integrity for $(basename "$backup_file")..."
for file in "${!RESTORE_LOCATIONS[@]}"; do
local backup_file="$backup_dir/$file"
if [ -f "$backup_file" ]; then
# Test archive integrity
if tar -tzf "$backup_file" >/dev/null 2>&1; then
log_success "Archive integrity check passed"
# List contents to verify expected files are present
log_message "Archive contents:"
tar -tzf "$backup_file" | while read file; do
log_success " Found: $file"
else
log_warning "Missing: $file"
fi
done
return 0
else
log_error "Archive integrity check failed"
return 1
fi
}
# Create backup of current Plex data
@@ -97,37 +107,56 @@ backup_current_data() {
# Restore files from backup
restore_files() {
local backup_date="$1"
local backup_file="$1"
local dry_run="$2"
local backup_dir="$BACKUP_ROOT/$backup_date"
if [ "$dry_run" = "true" ]; then
log_message "DRY RUN: Would restore the following files:"
else
log_message "Restoring files from backup $backup_date..."
if [ ! -f "$backup_file" ]; then
log_error "Backup file not found: $backup_file"
return 1
fi
for file in "${!RESTORE_LOCATIONS[@]}"; do
local backup_file="$backup_dir/$file"
local restore_location="${RESTORE_LOCATIONS[$file]}$file"
# Create temporary extraction directory
local temp_dir="/tmp/plex-restore-$(date +%Y%m%d_%H%M%S)"
mkdir -p "$temp_dir"
if [ -f "$backup_file" ]; then
if [ "$dry_run" = "true" ]; then
echo " $backup_file -> $restore_location"
log_message "Extracting backup archive..."
if ! tar -xzf "$backup_file" -C "$temp_dir"; then
log_error "Failed to extract backup archive"
rm -rf "$temp_dir"
return 1
fi
log_message "Restoring files..."
local restore_errors=0
for file in "${!RESTORE_LOCATIONS[@]}"; do
local src_file="$temp_dir/$file"
local dest_path="${RESTORE_LOCATIONS[$file]}"
local dest_file="$dest_path$file"
if [ -f "$src_file" ]; then
if [ "$dry_run" == "true" ]; then
log_message "Would restore: $file to $dest_file"
else
log_message "Restoring: $file"
if sudo cp "$backup_file" "$restore_location"; then
sudo chown plex:plex "$restore_location"
if sudo cp "$src_file" "$dest_file"; then
sudo chown plex:plex "$dest_file"
log_success "Restored: $file"
else
log_error "Failed to restore: $file"
return 1
restore_errors=$((restore_errors + 1))
fi
fi
else
log_warning "Backup file not found: $backup_file"
log_warning "File not found in backup: $file"
restore_errors=$((restore_errors + 1))
fi
done
# Clean up temporary directory
rm -rf "$temp_dir"
return $restore_errors
}
# Manage Plex service
@@ -151,7 +180,7 @@ manage_plex_service() {
# Main function
main() {
local backup_date="$1"
local backup_file="$1"
local dry_run=false
# Check for dry-run flag
@@ -159,30 +188,36 @@ main() {
dry_run=true
fi
# If no backup date provided, list available backups
if [ -z "$backup_date" ] || [ "$backup_date" = "--dry-run" ]; then
# If no backup file provided, list available backups
if [ -z "$backup_file" ] || [ "$backup_file" = "--dry-run" ]; then
list_backups
echo
echo "Usage: $0 <backup_date> [--dry-run]"
echo "Example: $0 20250125"
echo "Usage: $0 <backup_file> [--dry-run]"
echo "Example: $0 plex-backup-20250125_143022.tar.gz"
echo " $0 /mnt/share/media/backups/plex/plex-backup-20250125_143022.tar.gz"
exit 0
fi
# If relative path, prepend BACKUP_ROOT
if [[ "$backup_file" != /* ]]; then
backup_file="$BACKUP_ROOT/$backup_file"
fi
# Validate backup exists and is complete
if ! validate_backup "$backup_date"; then
if ! validate_backup "$backup_file"; then
log_error "Backup validation failed"
exit 1
fi
if [ "$dry_run" = "true" ]; then
restore_files "$backup_date" true
restore_files "$backup_file" true
log_message "Dry run completed. No changes were made."
exit 0
fi
# Confirm restoration
echo
log_warning "This will restore Plex data from backup $backup_date"
log_warning "This will restore Plex data from backup $(basename "$backup_file")"
log_warning "Current Plex data will be backed up before restoration"
read -p "Continue? (y/N): " -n 1 -r
echo
@@ -204,7 +239,7 @@ main() {
fi
# Restore files
if restore_files "$backup_date" false; then
if restore_files "$backup_file" false; then
log_success "Restoration completed successfully"
log_message "Current data backup saved at: $current_backup"
else

View File

@@ -54,11 +54,11 @@ validate_backup_structure() {
return 1
fi
local backup_count=$(find "$BACKUP_ROOT" -maxdepth 1 -type d -name "????????" | wc -l)
log_info "Found $backup_count backup directories"
local backup_count=$(find "$BACKUP_ROOT" -maxdepth 1 -type f -name "plex-backup-*.tar.gz" | wc -l)
log_info "Found $backup_count backup files"
if [ "$backup_count" -eq 0 ]; then
log_warning "No backup directories found"
log_warning "No backup files found"
return 1
fi
@@ -67,32 +67,31 @@ validate_backup_structure() {
# Validate individual backup
validate_backup() {
local backup_dir="$1"
local backup_date=$(basename "$backup_dir")
local backup_file="$1"
local backup_name=$(basename "$backup_file")
local errors=0
log_info "Validating backup: $backup_date"
log_info "Validating backup: $backup_name"
# Check if directory exists and is readable
if [ ! -d "$backup_dir" ] || [ ! -r "$backup_dir" ]; then
log_error "Backup directory not accessible: $backup_dir"
# Check if file exists and is readable
if [ ! -f "$backup_file" ] || [ ! -r "$backup_file" ]; then
log_error "Backup file not accessible: $backup_file"
return 1
fi
# Check for expected files
for file in "${EXPECTED_FILES[@]}"; do
local file_path="$backup_dir/$file"
if [ -f "$file_path" ]; then
# Check file size
local size=$(stat -c%s "$file_path")
if [ "$size" -gt 0 ]; then
local human_size=$(du -h "$file_path" | cut -f1)
log_success " $file ($human_size)"
else
log_error " $file is empty"
# Test archive integrity
if ! tar -tzf "$backup_file" >/dev/null 2>&1; then
log_error "Archive integrity check failed: $backup_name"
errors=$((errors + 1))
fi
else
log_success "Archive integrity check passed: $backup_name"
# Check for expected files in archive
local archive_contents=$(tar -tzf "$backup_file" 2>/dev/null)
for file in "${EXPECTED_FILES[@]}"; do
if echo "$archive_contents" | grep -q "^$file$"; then
log_success " Found: $file"
else
log_error " Missing file: $file"
errors=$((errors + 1))
@@ -100,15 +99,9 @@ validate_backup() {
done
# Check for unexpected files
local file_count=$(ls -1 "$backup_dir" | wc -l)
local expected_count=${#EXPECTED_FILES[@]}
if [ "$file_count" -ne "$expected_count" ]; then
log_warning " Expected $expected_count files, found $file_count"
ls -la "$backup_dir" | grep -v "^total" | grep -v "^d" | while read line; do
local filename=$(echo "$line" | awk '{print $9}')
if [[ ! " ${EXPECTED_FILES[@]} " =~ " ${filename} " ]]; then
log_warning " Unexpected file: $filename"
echo "$archive_contents" | while IFS= read -r line; do
if [[ ! " ${EXPECTED_FILES[@]} " =~ " ${line} " ]]; then
log_warning " Unexpected file: $line"
fi
done
fi
@@ -120,14 +113,16 @@ validate_backup() {
check_backup_freshness() {
log_info "Checking backup freshness..."
local latest_backup=$(find "$BACKUP_ROOT" -maxdepth 1 -type d -name "????????" | sort | tail -1)
local latest_backup=$(find "$BACKUP_ROOT" -maxdepth 1 -type f -name "plex-backup-*.tar.gz" | sort | tail -1)
if [ -z "$latest_backup" ]; then
log_error "No backups found"
return 1
fi
local backup_date=$(basename "$latest_backup")
local backup_filename=$(basename "$latest_backup")
# Extract date from filename: plex-backup-YYYYMMDD_HHMMSS.tar.gz
local backup_date=$(echo "$backup_filename" | sed 's/plex-backup-//' | sed 's/_.*$//')
local backup_timestamp=$(date -d "${backup_date:0:4}-${backup_date:4:2}-${backup_date:6:2}" +%s)
local current_timestamp=$(date +%s)
local age_days=$(( (current_timestamp - backup_timestamp) / 86400 ))
@@ -205,9 +200,9 @@ generate_report() {
echo "==================================" >> "$REPORT_FILE"
# Validate each backup
find "$BACKUP_ROOT" -maxdepth 1 -type d -name "????????" | sort | while read backup_dir; do
find "$BACKUP_ROOT" -maxdepth 1 -type f -name "plex-backup-*.tar.gz" | sort | while read backup_file; do
total_backups=$((total_backups + 1))
validate_backup "$backup_dir"
validate_backup "$backup_file"
local backup_errors=$?
if [ "$backup_errors" -eq 0 ]; then
@@ -239,13 +234,13 @@ fix_issues() {
log_success "JSON log file created/fixed"
fi
# Remove empty backup directories
find "$BACKUP_ROOT" -maxdepth 1 -type d -name "????????" -empty -delete 2>/dev/null || true
# Clean up any remaining dated directories from old backup structure
find "$BACKUP_ROOT" -maxdepth 1 -type d -name "????????" -exec rm -rf {} \; 2>/dev/null || true
# Fix permissions if needed
if [ -d "$BACKUP_ROOT" ]; then
chmod 755 "$BACKUP_ROOT"
find "$BACKUP_ROOT" -type f -exec chmod 644 {} \; 2>/dev/null || true
find "$BACKUP_ROOT" -type f -name "plex-backup-*.tar.gz" -exec chmod 644 {} \; 2>/dev/null || true
log_success "Fixed backup permissions"
fi
}