feat: Revamp Plex backup system to streamline archive structure and enhance validation processes

This commit is contained in:
Peter Wood
2025-05-26 07:51:24 -04:00
parent fbd0bf5852
commit 68f7f4ef8e
7 changed files with 390 additions and 162 deletions

View File

@@ -129,10 +129,10 @@ Specialized backup system for Plex Media Server with database-aware features:
./validate-plex-backups.sh ./validate-plex-backups.sh
# Test restore without making changes (dry run) # Test restore without making changes (dry run)
./restore-plex.sh 20250125 --dry-run ./restore-plex.sh plex-backup-20250125_143022.tar.gz --dry-run
# Restore from specific backup # Restore from specific backup archive
./restore-plex.sh 20250125 ./restore-plex.sh plex-backup-20250125_143022.tar.gz
``` ```
## Automation and Scheduling ## Automation and Scheduling
@@ -183,10 +183,32 @@ The enhanced media backup script includes configurable parameters at the top of
The Plex backup script configuration parameters: The Plex backup script configuration parameters:
- `MAX_BACKUP_AGE_DAYS=30`: Remove backups older than 30 days - `MAX_BACKUP_AGE_DAYS=30`: Remove backups older than 30 days
- `MAX_BACKUPS_TO_KEEP=10`: Keep maximum of 10 backup sets - `MAX_BACKUPS_TO_KEEP=10`: Keep maximum of 10 backup archives
- `BACKUP_ROOT`: Location for backup storage - `BACKUP_ROOT`: Location for compressed backup archives
- `LOG_ROOT`: Location for backup logs - `LOG_ROOT`: Location for backup logs
### Final Backup Directory Structure
The enhanced Plex backup system creates a streamlined archive-only structure:
```bash
/mnt/share/media/backups/plex/
├── plex-backup-20250125_143022.tar.gz # Latest backup
├── plex-backup-20250124_143011.tar.gz # Previous backup
├── plex-backup-20250123_143008.tar.gz # Older backup
└── logs/
├── backup_log_20250125_143022.md
├── plex-backup-performance.json
└── plex-backup.json
```
**Key Benefits:**
- **Direct Archive Storage**: No intermediate directories required
- **Efficient Space Usage**: Only compressed files stored permanently
- **Easy Management**: Timestamp-based naming for clear identification
- **Automatic Cleanup**: Legacy dated directories removed automatically
### Recommended Backup Strategy ### Recommended Backup Strategy
Both systems implement a robust backup strategy following industry best practices: Both systems implement a robust backup strategy following industry best practices:

View File

@@ -466,7 +466,7 @@ handle_wal_files() {
"backup") "backup")
if [ -f "$wal_file" ]; then if [ -f "$wal_file" ]; then
log_info "Found WAL/SHM file: $wal_basename" log_info "Found WAL/SHM file: $wal_basename"
local backup_file="${backup_path}/${wal_basename}.$(date '+%Y%m%d_%H%M%S')" local backup_file="${backup_path}/${wal_basename}"
if sudo cp "$wal_file" "$backup_file"; then if sudo cp "$wal_file" "$backup_file"; then
log_success "Backed up WAL/SHM file: $wal_basename" log_success "Backed up WAL/SHM file: $wal_basename"
@@ -799,20 +799,23 @@ cleanup_old_backups() {
log_message "Cleaning up old backups..." log_message "Cleaning up old backups..."
# Remove backups older than MAX_BACKUP_AGE_DAYS # Remove backups older than MAX_BACKUP_AGE_DAYS
find "${BACKUP_ROOT}" -maxdepth 1 -type d -name "????????" -mtime +${MAX_BACKUP_AGE_DAYS} -exec rm -rf {} \; 2>/dev/null || true find "${BACKUP_ROOT}" -maxdepth 1 -type f -name "plex-backup-*.tar.gz" -mtime +${MAX_BACKUP_AGE_DAYS} -delete 2>/dev/null || true
# Keep only MAX_BACKUPS_TO_KEEP most recent backups # Keep only MAX_BACKUPS_TO_KEEP most recent backups
local backup_count=$(find "${BACKUP_ROOT}" -maxdepth 1 -type d -name "????????" | wc -l) local backup_count=$(find "${BACKUP_ROOT}" -maxdepth 1 -type f -name "plex-backup-*.tar.gz" | wc -l)
if [ "$backup_count" -gt "$MAX_BACKUPS_TO_KEEP" ]; then if [ "$backup_count" -gt "$MAX_BACKUPS_TO_KEEP" ]; then
local excess_count=$((backup_count - MAX_BACKUPS_TO_KEEP)) local excess_count=$((backup_count - MAX_BACKUPS_TO_KEEP))
log_message "Removing $excess_count old backup(s)..." log_message "Removing $excess_count old backup(s)..."
find "${BACKUP_ROOT}" -maxdepth 1 -type d -name "????????" -printf '%T@ %p\n' | \ find "${BACKUP_ROOT}" -maxdepth 1 -type f -name "plex-backup-*.tar.gz" -printf '%T@ %p\n' | \
sort -n | head -n "$excess_count" | cut -d' ' -f2- | \ sort -n | head -n "$excess_count" | cut -d' ' -f2- | \
xargs -r rm -rf xargs -r rm -f
fi fi
# Clean up any remaining dated directories from old backup structure
find "${BACKUP_ROOT}" -maxdepth 1 -type d -name "????????" -exec rm -rf {} \; 2>/dev/null || true
log_message "Backup cleanup completed" log_message "Backup cleanup completed"
} }
@@ -929,9 +932,9 @@ main() {
local backup_errors=0 local backup_errors=0
local files_backed_up=0 local files_backed_up=0
local BACKUP_PATH="${BACKUP_ROOT}/$(date '+%Y%m%d')" local BACKUP_PATH="${BACKUP_ROOT}"
# Create today's backup directory # Ensure backup root directory exists
mkdir -p "$BACKUP_PATH" mkdir -p "$BACKUP_PATH"
# Handle WAL files and check database integrity before backup # Handle WAL files and check database integrity before backup
@@ -989,8 +992,8 @@ main() {
if needs_backup "$file" ]; then if needs_backup "$file" ]; then
log_message "Backing up: $(basename "$file")" log_message "Backing up: $(basename "$file")"
# Create backup filename with timestamp # Create backup filename without timestamp (use original filename)
local backup_file="${BACKUP_PATH}/$(basename "$file").$(date '+%Y%m%d_%H%M%S')" local backup_file="${BACKUP_PATH}/$(basename "$file")"
# Copy file # Copy file
if sudo cp "$file" "$backup_file"; then if sudo cp "$file" "$backup_file"; then
@@ -1029,32 +1032,136 @@ main() {
# Create archive if files were backed up # Create archive if files were backed up
if [ "$files_backed_up" -gt 0 ]; then if [ "$files_backed_up" -gt 0 ]; then
log_message "Creating compressed archive..." log_message "Creating compressed archive..."
local temp_archive="/tmp/plex-backup-$(date '+%Y%m%d_%H%M%S').tar.gz"
local final_archive="${BACKUP_PATH}/plex-backup-$(date '+%Y%m%d_%H%M%S').tar.gz"
# Create archive in /tmp first to avoid "file changed" issues # Check backup root directory is writable
if tar --exclude="*.tar.gz" -czf "$temp_archive" -C "$(dirname "$BACKUP_PATH")" "$(basename "$BACKUP_PATH")"; then if [ ! -w "$BACKUP_ROOT" ]; then
# Move the completed archive to the backup directory log_error "Backup root directory is not writable: $BACKUP_ROOT"
if mv "$temp_archive" "$final_archive"; then backup_errors=$((backup_errors + 1))
log_success "Archive created: $(basename "$final_archive")"
# Remove individual backup files, keep only the archive
find "$BACKUP_PATH" -type f ! -name "*.tar.gz" -delete
else else
log_error "Failed to move archive to final location" local temp_archive="/tmp/plex-backup-$(date '+%Y%m%d_%H%M%S').tar.gz"
rm -f "$temp_archive" local final_archive="${BACKUP_ROOT}/plex-backup-$(date '+%Y%m%d_%H%M%S').tar.gz"
log_info "Temporary archive: $temp_archive"
log_info "Final archive: $final_archive"
# Create archive in /tmp first, containing only the backed up files
local temp_dir="/tmp/plex-backup-staging-$(date '+%Y%m%d_%H%M%S')"
if ! mkdir -p "$temp_dir"; then
log_error "Failed to create staging directory: $temp_dir"
backup_errors=$((backup_errors + 1))
else
log_info "Created staging directory: $temp_dir"
# Copy backed up files to staging directory
local files_staged=0
for nickname in "${!PLEX_FILES[@]}"; do
local file="${PLEX_FILES[$nickname]}"
local backup_file="${BACKUP_PATH}/$(basename "$file")"
if [ -f "$backup_file" ]; then
if cp "$backup_file" "$temp_dir/"; then
files_staged=$((files_staged + 1))
log_info "Staged for archive: $(basename "$backup_file")"
else
log_warning "Failed to stage file: $(basename "$backup_file")"
fi
else
log_warning "Backup file not found for staging: $(basename "$backup_file")"
fi
done
# Check if any files were staged
if [ "$files_staged" -eq 0 ]; then
log_error "No files were staged for archive creation"
rm -rf "$temp_dir"
backup_errors=$((backup_errors + 1))
else
log_info "Staged $files_staged files for archive creation"
# Check disk space in /tmp
local temp_available_kb=$(df /tmp | awk 'NR==2 {print $4}')
local temp_available_mb=$((temp_available_kb / 1024))
local staging_size_mb=$(du -sm "$temp_dir" | cut -f1)
log_info "/tmp available space: ${temp_available_mb}MB, staging directory size: ${staging_size_mb}MB"
# Check if we have enough space (require 3x staging size for compression)
local required_space_mb=$((staging_size_mb * 3))
if [ "$temp_available_mb" -lt "$required_space_mb" ]; then
log_error "Insufficient space in /tmp for archive creation. Required: ${required_space_mb}MB, Available: ${temp_available_mb}MB"
rm -rf "$temp_dir"
backup_errors=$((backup_errors + 1))
else
# Create archive with detailed error logging
log_info "Creating archive: $(basename "$temp_archive")"
local tar_output
tar_output=$(tar -czf "$temp_archive" -C "$temp_dir" . 2>&1)
local tar_exit_code=$?
if [ $tar_exit_code -eq 0 ]; then
# Verify archive was actually created and has reasonable size
if [ -f "$temp_archive" ]; then
local archive_size_mb=$(du -sm "$temp_archive" | cut -f1)
log_success "Archive created successfully: $(basename "$temp_archive") (${archive_size_mb}MB)"
# Test archive integrity before moving
if tar -tzf "$temp_archive" >/dev/null 2>&1; then
log_success "Archive integrity verified"
# Move the completed archive to the backup root
if mv "$temp_archive" "$final_archive"; then
log_success "Archive moved to final location: $(basename "$final_archive")"
# Remove individual backup files and staging directory
rm -rf "$temp_dir"
for nickname in "${!PLEX_FILES[@]}"; do
local file="${PLEX_FILES[$nickname]}"
local backup_file="${BACKUP_PATH}/$(basename "$file")"
rm -f "$backup_file" "$backup_file.md5"
done
else
log_error "Failed to move archive to final location: $final_archive"
log_error "Temporary archive remains at: $temp_archive"
rm -rf "$temp_dir"
backup_errors=$((backup_errors + 1)) backup_errors=$((backup_errors + 1))
fi fi
else else
log_error "Failed to create archive" log_error "Archive integrity check failed - archive may be corrupted"
log_error "Archive size: ${archive_size_mb}MB"
rm -f "$temp_archive"
rm -rf "$temp_dir"
backup_errors=$((backup_errors + 1)) backup_errors=$((backup_errors + 1))
fi fi
else
log_error "Archive file was not created despite tar success"
rm -rf "$temp_dir"
backup_errors=$((backup_errors + 1))
fi
else
log_error "Failed to create archive (tar exit code: $tar_exit_code)"
if [ -n "$tar_output" ]; then
log_error "Tar command output: $tar_output"
fi
# Additional diagnostic information
log_error "Staging directory contents:"
ls -la "$temp_dir" 2>&1 | while IFS= read -r line; do
log_error " $line"
done
local temp_usage=$(df -h /tmp | awk 'NR==2 {print "Used: " $3 "/" $2 " (" $5 ")"}')
log_error "Temp filesystem status: $temp_usage"
rm -rf "$temp_dir"
backup_errors=$((backup_errors + 1))
fi
fi
fi
fi
fi
# Send notification # Send notification
send_notification "Backup Completed" "Successfully backed up $files_backed_up files" "success" send_notification "Backup Completed" "Successfully backed up $files_backed_up files" "success"
else else
log_message "No files needed backup, removing empty backup directory" log_message "No files needed backup"
rmdir "$BACKUP_PATH" 2>/dev/null || true
fi fi
# Cleanup old backups # Cleanup old backups

View File

@@ -302,7 +302,38 @@ The script includes a comprehensive testing framework (`test-plex-backup.sh`):
## Backup Strategy ## Backup Strategy
The enhanced script implements a robust backup strategy: The enhanced script implements a robust backup strategy with a streamlined tar.gz-only structure:
### Archive-Only Directory Structure
The new backup system eliminates intermediate dated directories and stores only compressed archives:
```bash
/mnt/share/media/backups/plex/
├── plex-backup-20250125_143022.tar.gz # Latest backup
├── plex-backup-20250124_143011.tar.gz # Previous backup
├── plex-backup-20250123_143008.tar.gz # Older backup
└── logs/
├── backup_log_20250125_143022.md
├── plex-backup-performance.json
└── plex-backup.json
```
### Archive Naming Convention
Backup files follow the pattern: `plex-backup-YYYYMMDD_HHMMSS.tar.gz`
- **YYYYMMDD**: Date of backup (e.g., 20250125)
- **HHMMSS**: Time of backup (e.g., 143022)
- **tar.gz**: Compressed archive format
### Key Improvements
1. **Direct Archive Creation**: No intermediate directories required
2. **Efficient Storage**: Only compressed files stored permanently
3. **Easy Identification**: Timestamp-based naming for sorting
4. **Legacy Cleanup**: Automatic removal of old dated directories
5. **Archive Validation**: Integrity checking of compressed files
### 3-2-1 Backup Rule ### 3-2-1 Backup Rule

View File

@@ -20,43 +20,55 @@ The enhanced script performs the following advanced tasks:
## Enhanced Features ## Enhanced Features
### Performance Tracking ### Performance Tracking
- **JSON Performance Logs**: All operations are timed and logged to `logs/plex-backup-performance.json` - **JSON Performance Logs**: All operations are timed and logged to `logs/plex-backup-performance.json`
- **Performance Reports**: Automatic generation of average performance metrics - **Performance Reports**: Automatic generation of average performance metrics
- **Operation Monitoring**: Tracks backup, verification, service management, and overall script execution times - **Operation Monitoring**: Tracks backup, verification, service management, and overall script execution times
### Notification System ### Notification System
The script supports multiple notification channels: The script supports multiple notification channels:
#### Console Notifications #### Console Notifications
- Color-coded status messages (Success: Green, Error: Red, Warning: Yellow, Info: Blue) - Color-coded status messages (Success: Green, Error: Red, Warning: Yellow, Info: Blue)
- Timestamped log entries with clear formatting - Timestamped log entries with clear formatting
#### Webhook Notifications #### Webhook Notifications
```bash ```bash
./backup-plex.sh --webhook=https://your-webhook-url.com/endpoint ./backup-plex.sh --webhook=https://your-webhook-url.com/endpoint
``` ```
Sends JSON payloads with backup status, hostname, and timestamps. Sends JSON payloads with backup status, hostname, and timestamps.
#### Email Notifications #### Email Notifications
```bash ```bash
./backup-plex.sh --email=admin@example.com ./backup-plex.sh --email=admin@example.com
``` ```
Requires `sendmail` to be configured on the system. Requires `sendmail` to be configured on the system.
### WAL File Management ### WAL File Management
The script now properly handles SQLite Write-Ahead Logging files: The script now properly handles SQLite Write-Ahead Logging files:
- **Automatic Detection**: Identifies and backs up `.db-wal` and `.db-shm` files when present - **Automatic Detection**: Identifies and backs up `.db-wal` and `.db-shm` files when present
- **WAL Checkpointing**: Performs `PRAGMA wal_checkpoint(FULL)` before integrity checks - **WAL Checkpointing**: Performs `PRAGMA wal_checkpoint(FULL)` before integrity checks
- **Safe Backup**: Ensures WAL files are properly backed up alongside main database files - **Safe Backup**: Ensures WAL files are properly backed up alongside main database files
### Database Integrity & Repair ### Database Integrity & Repair
Enhanced database management features: Enhanced database management features:
- **Pre-backup Integrity Checks**: Verifies database health before backup operations - **Pre-backup Integrity Checks**: Verifies database health before backup operations
- **Automated Repair**: Optional automatic repair of corrupted databases using advanced techniques - **Automated Repair**: Optional automatic repair of corrupted databases using advanced techniques
- **Interactive Repair Mode**: Prompts for repair decisions when issues are detected - **Interactive Repair Mode**: Prompts for repair decisions when issues are detected
- **Post-repair Verification**: Re-checks integrity after repair operations - **Post-repair Verification**: Re-checks integrity after repair operations
### Parallel Processing ### Parallel Processing
- **Concurrent Verification**: Parallel backup verification for improved performance - **Concurrent Verification**: Parallel backup verification for improved performance
- **Fallback Safety**: Automatically falls back to sequential processing if parallel mode fails - **Fallback Safety**: Automatically falls back to sequential processing if parallel mode fails
- **Configurable**: Can be disabled with `--no-parallel` for maximum safety - **Configurable**: Can be disabled with `--no-parallel` for maximum safety
@@ -120,65 +132,62 @@ fi
This block checks if the Plex Media Server service is running. If it is, the script stops the service using a custom script (`plex.sh`). This block checks if the Plex Media Server service is running. If it is, the script stops the service using a custom script (`plex.sh`).
### 5. Create Backup Directory ### 5. Backup Plex Database Files and Preferences
The enhanced backup system now creates compressed archives directly, eliminating intermediate directories:
```bash ```bash
mkdir -p /mnt/share/media/backups/plex/$(date +%Y%m%d)/ || { echo "Failed to create backup directory"; exit 1; } # Files are copied to temporary staging area for verification
cp "/var/lib/plexmediaserver/Library/Application Support/Plex Media Server/Plug-in Support/Databases/com.plexapp.plugins.library.db" "$BACKUP_PATH/"
log_file_details "com.plexapp.plugins.library.db" "$BACKUP_PATH/"
cp "/var/lib/plexmediaserver/Library/Application Support/Plex Media Server/Plug-in Support/Databases/com.plexapp.plugins.library.blobs.db" "$BACKUP_PATH/"
log_file_details "com.plexapp.plugins.library.blobs.db" "$BACKUP_PATH/"
cp "/var/lib/plexmediaserver/Library/Application Support/Plex Media Server/Preferences.xml" "$BACKUP_PATH/"
log_file_details "Preferences.xml" "$BACKUP_PATH/"
``` ```
This command creates a backup directory with the current date. If the directory creation fails, the script exits with an error message. These commands copy the Plex database files and preferences directly to the backup root directory. Each file copy operation includes integrity verification and checksum validation.
### 6. Copy Plex Database Files and Preferences ### 6. Create Compressed Archive
```bash ```bash
cp "/var/lib/plexmediaserver/Library/Application Support/Plex Media Server/Plug-in Support/Databases/com.plexapp.plugins.library.db" /mnt/share/media/backups/plex/$(date +%Y%m%d)/ || { echo "Failed to copy com.plexapp.plugins.library.db"; exit 1; } # Create archive directly with timestamp naming convention
log_file_details "com.plexapp.plugins.library.db" "/mnt/share/media/backups/plex/$(date +%Y%m%d)/" final_archive="${BACKUP_ROOT}/plex-backup-$(date '+%Y%m%d_%H%M%S').tar.gz"
tar -czf "$final_archive" -C "$temp_staging_dir" .
cp "/var/lib/plexmediaserver/Library/Application Support/Plex Media Server/Plug-in Support/Databases/com.plexapp.plugins.library.blobs.db" /mnt/share/media/backups/plex/$(date +%Y%m%d)/ || { echo "Failed to copy com.plexapp.plugins.library.blobs.db"; exit 1; }
log_file_details "com.plexapp.plugins.library.blobs.db" "/mnt/share/media/backups/plex/$(date +%Y%m%d)/"
cp "/var/lib/plexmediaserver/Library/Application Support/Plex Media Server/Preferences.xml" /mnt/share/media/backups/plex/$(date +%Y%m%d)/ || { echo "Failed to copy Preferences.xml"; exit 1; }
log_file_details "Preferences.xml" "/mnt/share/media/backups/plex/$(date +%Y%m%d)/"
``` ```
These commands copy the Plex database files and preferences to the backup directory. Each file copy operation is followed by a call to the `log_file_details` function to log the details of the copied files. The system now creates compressed archives directly using a timestamp-based naming convention (`plex-backup-YYYYMMDD_HHMMSS.tar.gz`), eliminating the need for intermediate dated directories.
### 7. Compress the Backup Directory ### 7. Archive Validation and Cleanup
```bash ```bash
tar -czf /mnt/share/media/backups/plex/$(date +%Y%m%d).tar.gz -C /mnt/share/media/backups/plex/plex $(date +%Y%m%d) || { echo "Failed to compress backup folder"; exit 1; } # Validate archive integrity
``` if tar -tzf "$final_archive" >/dev/null 2>&1; then
log_success "Archive created and validated: $(basename "$final_archive")"
This command compresses the backup directory into a gzip archive. If the compression fails, the script exits with an error message. # Clean up temporary staging files
rm -rf "$temp_staging_dir"
### 8. Delete Original Backup Directory
```bash
if [ $? -eq 0 ]; then
if [ -s /mnt/share/media/backups/plex/$(date +%Y%m%d).tar.gz ]; then
rm -rf /mnt/share/media/backups/plex/$(date +%Y%m%d)/ || { echo "Failed to delete original backup folder"; exit 1; }
else else
echo "Compressed file is empty, not deleting the backup folder" >> "$LOG_FILE" log_error "Archive validation failed"
fi rm -f "$final_archive"
else
echo "Compression failed, not deleting the backup folder" >> "$LOG_FILE"
fi fi
``` ```
This block checks if the compression was successful. If it was, and the compressed file is not empty, it deletes the original backup directory. If the compression failed or the compressed file is empty, it logs an appropriate message. The system validates the created archive and removes temporary staging files, ensuring only valid compressed backups are retained in the backup root directory.
### 9. Send Notification ### 8. Send Notification
```bash ```bash
curl \ curl \
-H tags:popcorn,backup,plex,${HOSTNAME} \ -H tags:popcorn,backup,plex,${HOSTNAME} \
-d "The Plex databases have been saved to the /media/backups/plex folder" \ -d "The Plex databases have been saved to the /media/backups/plex folder as plex-backup-YYYYMMDD_HHMMSS.tar.gz" \
https://notify.peterwood.rocks/lab || { echo "Failed to send notification"; exit 1; } https://notify.peterwood.rocks/lab || { echo "Failed to send notification"; exit 1; }
``` ```
This command sends a notification upon completion of the backup process. If the notification fails, the script exits with an error message. This command sends a notification upon completion of the backup process, indicating the compressed archive has been created. If the notification fails, the script exits with an error message.
### 10. Restart Plex Media Server Service ### 9. Restart Plex Media Server Service
```bash ```bash
if systemctl is-enabled --quiet plexmediaserver.service; then if systemctl is-enabled --quiet plexmediaserver.service; then
@@ -188,11 +197,36 @@ fi
This block checks if the Plex Media Server service is enabled. If it is, the script restarts the service using a custom script (`plex.sh`). This block checks if the Plex Media Server service is enabled. If it is, the script restarts the service using a custom script (`plex.sh`).
### 10. Legacy Cleanup
```bash
# Clean up any remaining dated directories from old backup structure
find "${BACKUP_ROOT}" -maxdepth 1 -type d -name "????????" -exec rm -rf {} \; 2>/dev/null || true
```
The enhanced system includes cleanup of legacy dated directories from previous backup structure versions, ensuring a clean tar.gz-only backup directory.
## Important Information ## Important Information
- Ensure that the [`plex.sh`](https://github.com/acedanger/shell/blob/main/plex.sh) script is available and executable. This script is used to stop and start the Plex Media Server service. - Ensure that the [`plex.sh`](https://github.com/acedanger/shell/blob/main/plex.sh) script is available and executable. This script is used to stop and start the Plex Media Server service.
- The script uses `systemctl` to manage the Plex Media Server service. Ensure that `systemctl` is available on your system. - The script uses `systemctl` to manage the Plex Media Server service. Ensure that `systemctl` is available on your system.
- The backup directory and log directory paths are hardcoded. Modify these paths as needed to fit your environment. - **New Directory Structure**: The enhanced backup system stores only compressed `.tar.gz` files directly in the backup root directory, eliminating intermediate dated directories.
- The script logs important actions and errors to a log file with a timestamped filename. Check the log file for details if any issues arise. - **Archive Naming**: Backup files follow the naming convention `plex-backup-YYYYMMDD_HHMMSS.tar.gz` for easy identification and sorting.
- **Legacy Compatibility**: The system automatically cleans up old dated directories from previous backup versions during operation.
- The backup directory path is configurable through the `BACKUP_ROOT` variable. Modify this path as needed to fit your environment.
- The script logs important actions and errors to timestamped log files. Check the log files for details if any issues arise.
- **Backup Validation**: All archives undergo integrity checking to ensure backup reliability.
By following this documentation, you should be able to understand and use the `backup-plex.sh` script effectively. ## Final Directory Structure
```
/mnt/share/media/backups/plex/
├── plex-backup-20250125_143022.tar.gz
├── plex-backup-20250124_143011.tar.gz
├── plex-backup-20250123_143008.tar.gz
└── logs/
├── backup_log_20250125_143022.md
└── plex-backup-performance.json
```
By following this documentation, you should be able to understand and use the enhanced `backup-plex.sh` script effectively with its new streamlined tar.gz-only structure.

View File

@@ -132,16 +132,18 @@ check_backup_status() {
# Count total backups # Count total backups
local backup_count=0 local backup_count=0
if [ -d "$BACKUP_ROOT" ]; then if [ -d "$BACKUP_ROOT" ]; then
backup_count=$(find "$BACKUP_ROOT" -maxdepth 1 -type d -name "????????" 2>/dev/null | wc -l) backup_count=$(find "$BACKUP_ROOT" -maxdepth 1 -type f -name "plex-backup-*.tar.gz" 2>/dev/null | wc -l)
fi fi
if [ "$backup_count" -gt 0 ]; then if [ "$backup_count" -gt 0 ]; then
log_status "OK" "Total backups: $backup_count" log_status "OK" "Total backups: $backup_count"
# Find latest backup # Find latest backup
local latest_backup=$(find "$BACKUP_ROOT" -maxdepth 1 -type d -name "????????" 2>/dev/null | sort | tail -1) local latest_backup=$(find "$BACKUP_ROOT" -maxdepth 1 -type f -name "plex-backup-*.tar.gz" 2>/dev/null | sort | tail -1)
if [ -n "$latest_backup" ]; then if [ -n "$latest_backup" ]; then
local backup_date=$(basename "$latest_backup") local backup_filename=$(basename "$latest_backup")
# Extract date from filename: plex-backup-YYYYMMDD_HHMMSS.tar.gz
local backup_date=$(echo "$backup_filename" | sed 's/plex-backup-//' | sed 's/_.*$//')
local readable_date=$(date -d "${backup_date:0:4}-${backup_date:4:2}-${backup_date:6:2}" '+%B %d, %Y' 2>/dev/null || echo "Invalid date") local readable_date=$(date -d "${backup_date:0:4}-${backup_date:4:2}-${backup_date:6:2}" '+%B %d, %Y' 2>/dev/null || echo "Invalid date")
local backup_age_days=$(( ($(date +%s) - $(date -d "${backup_date:0:4}-${backup_date:4:2}-${backup_date:6:2}" +%s 2>/dev/null || echo "0")) / 86400 )) local backup_age_days=$(( ($(date +%s) - $(date -d "${backup_date:0:4}-${backup_date:4:2}-${backup_date:6:2}" +%s 2>/dev/null || echo "0")) / 86400 ))
@@ -157,8 +159,8 @@ check_backup_status() {
local backup_size=$(du -sh "$latest_backup" 2>/dev/null | cut -f1) local backup_size=$(du -sh "$latest_backup" 2>/dev/null | cut -f1)
log_status "INFO" "Latest backup size: $backup_size" log_status "INFO" "Latest backup size: $backup_size"
# Check backup contents # Check backup contents (via tar listing)
local file_count=$(ls -1 "$latest_backup" 2>/dev/null | wc -l) local file_count=$(tar -tzf "$latest_backup" 2>/dev/null | wc -l)
log_status "INFO" "Files in latest backup: $file_count" log_status "INFO" "Files in latest backup: $file_count"
fi fi
else else
@@ -322,9 +324,11 @@ show_recommendations() {
# Check backup age # Check backup age
if [ -d "$BACKUP_ROOT" ]; then if [ -d "$BACKUP_ROOT" ]; then
local latest_backup=$(find "$BACKUP_ROOT" -maxdepth 1 -type d -name "????????" 2>/dev/null | sort | tail -1) local latest_backup=$(find "$BACKUP_ROOT" -maxdepth 1 -type f -name "plex-backup-*.tar.gz" 2>/dev/null | sort | tail -1)
if [ -n "$latest_backup" ]; then if [ -n "$latest_backup" ]; then
local backup_date=$(basename "$latest_backup") local backup_filename=$(basename "$latest_backup")
# Extract date from filename: plex-backup-YYYYMMDD_HHMMSS.tar.gz
local backup_date=$(echo "$backup_filename" | sed 's/plex-backup-//' | sed 's/_.*$//')
local backup_age_days=$(( ($(date +%s) - $(date -d "${backup_date:0:4}-${backup_date:4:2}-${backup_date:6:2}" +%s 2>/dev/null || echo "0")) / 86400 )) local backup_age_days=$(( ($(date +%s) - $(date -d "${backup_date:0:4}-${backup_date:4:2}-${backup_date:6:2}" +%s 2>/dev/null || echo "0")) / 86400 ))
if [ "$backup_age_days" -gt 7 ]; then if [ "$backup_age_days" -gt 7 ]; then
recommendations+=("Consider running a manual backup - latest backup is $backup_age_days days old") recommendations+=("Consider running a manual backup - latest backup is $backup_age_days days old")

View File

@@ -41,34 +41,44 @@ log_warning() {
# List available backups # List available backups
list_backups() { list_backups() {
log_message "Available backups:" log_message "Available backups:"
find "$BACKUP_ROOT" -maxdepth 1 -type d -name "????????" | sort -r | while read backup_dir; do find "$BACKUP_ROOT" -maxdepth 1 -type f -name "plex-backup-*.tar.gz" | sort -r | while read backup_file; do
local backup_date=$(basename "$backup_dir") local backup_name=$(basename "$backup_file")
local readable_date=$(date -d "${backup_date:0:4}-${backup_date:4:2}-${backup_date:6:2}" '+%B %d, %Y') local backup_date=$(echo "$backup_name" | sed 's/plex-backup-\([0-9]\{8\}\)_[0-9]\{6\}\.tar\.gz/\1/')
local file_count=$(ls -1 "$backup_dir" 2>/dev/null | wc -l) if [[ "$backup_date" =~ ^[0-9]{8}$ ]]; then
echo " $backup_date ($readable_date) - $file_count files" local readable_date=$(date -d "${backup_date:0:4}-${backup_date:4:2}-${backup_date:6:2}" '+%B %d, %Y' 2>/dev/null || echo "Unknown date")
local file_size=$(du -h "$backup_file" 2>/dev/null | cut -f1)
echo " $backup_name ($readable_date) - $file_size"
else
echo " $backup_name - $(du -h "$backup_file" 2>/dev/null | cut -f1)"
fi
done done
} }
# Validate backup integrity # Validate backup integrity
validate_backup() { validate_backup() {
local backup_date="$1" local backup_file="$1"
local backup_dir="$BACKUP_ROOT/$backup_date"
if [ ! -d "$backup_dir" ]; then if [ ! -f "$backup_file" ]; then
log_error "Backup directory not found: $backup_dir" log_error "Backup file not found: $backup_file"
return 1 return 1
fi fi
log_message "Validating backup integrity for $backup_date..." log_message "Validating backup integrity for $(basename "$backup_file")..."
for file in "${!RESTORE_LOCATIONS[@]}"; do # Test archive integrity
local backup_file="$backup_dir/$file" if tar -tzf "$backup_file" >/dev/null 2>&1; then
if [ -f "$backup_file" ]; then log_success "Archive integrity check passed"
# List contents to verify expected files are present
log_message "Archive contents:"
tar -tzf "$backup_file" | while read file; do
log_success " Found: $file" log_success " Found: $file"
else
log_warning "Missing: $file"
fi
done done
return 0
else
log_error "Archive integrity check failed"
return 1
fi
} }
# Create backup of current Plex data # Create backup of current Plex data
@@ -97,37 +107,56 @@ backup_current_data() {
# Restore files from backup # Restore files from backup
restore_files() { restore_files() {
local backup_date="$1" local backup_file="$1"
local dry_run="$2" local dry_run="$2"
local backup_dir="$BACKUP_ROOT/$backup_date"
if [ "$dry_run" = "true" ]; then if [ ! -f "$backup_file" ]; then
log_message "DRY RUN: Would restore the following files:" log_error "Backup file not found: $backup_file"
else return 1
log_message "Restoring files from backup $backup_date..."
fi fi
for file in "${!RESTORE_LOCATIONS[@]}"; do # Create temporary extraction directory
local backup_file="$backup_dir/$file" local temp_dir="/tmp/plex-restore-$(date +%Y%m%d_%H%M%S)"
local restore_location="${RESTORE_LOCATIONS[$file]}$file" mkdir -p "$temp_dir"
if [ -f "$backup_file" ]; then log_message "Extracting backup archive..."
if [ "$dry_run" = "true" ]; then if ! tar -xzf "$backup_file" -C "$temp_dir"; then
echo " $backup_file -> $restore_location" log_error "Failed to extract backup archive"
rm -rf "$temp_dir"
return 1
fi
log_message "Restoring files..."
local restore_errors=0
for file in "${!RESTORE_LOCATIONS[@]}"; do
local src_file="$temp_dir/$file"
local dest_path="${RESTORE_LOCATIONS[$file]}"
local dest_file="$dest_path$file"
if [ -f "$src_file" ]; then
if [ "$dry_run" == "true" ]; then
log_message "Would restore: $file to $dest_file"
else else
log_message "Restoring: $file" log_message "Restoring: $file"
if sudo cp "$backup_file" "$restore_location"; then if sudo cp "$src_file" "$dest_file"; then
sudo chown plex:plex "$restore_location" sudo chown plex:plex "$dest_file"
log_success "Restored: $file" log_success "Restored: $file"
else else
log_error "Failed to restore: $file" log_error "Failed to restore: $file"
return 1 restore_errors=$((restore_errors + 1))
fi fi
fi fi
else else
log_warning "Backup file not found: $backup_file" log_warning "File not found in backup: $file"
restore_errors=$((restore_errors + 1))
fi fi
done done
# Clean up temporary directory
rm -rf "$temp_dir"
return $restore_errors
} }
# Manage Plex service # Manage Plex service
@@ -151,7 +180,7 @@ manage_plex_service() {
# Main function # Main function
main() { main() {
local backup_date="$1" local backup_file="$1"
local dry_run=false local dry_run=false
# Check for dry-run flag # Check for dry-run flag
@@ -159,30 +188,36 @@ main() {
dry_run=true dry_run=true
fi fi
# If no backup date provided, list available backups # If no backup file provided, list available backups
if [ -z "$backup_date" ] || [ "$backup_date" = "--dry-run" ]; then if [ -z "$backup_file" ] || [ "$backup_file" = "--dry-run" ]; then
list_backups list_backups
echo echo
echo "Usage: $0 <backup_date> [--dry-run]" echo "Usage: $0 <backup_file> [--dry-run]"
echo "Example: $0 20250125" echo "Example: $0 plex-backup-20250125_143022.tar.gz"
echo " $0 /mnt/share/media/backups/plex/plex-backup-20250125_143022.tar.gz"
exit 0 exit 0
fi fi
# If relative path, prepend BACKUP_ROOT
if [[ "$backup_file" != /* ]]; then
backup_file="$BACKUP_ROOT/$backup_file"
fi
# Validate backup exists and is complete # Validate backup exists and is complete
if ! validate_backup "$backup_date"; then if ! validate_backup "$backup_file"; then
log_error "Backup validation failed" log_error "Backup validation failed"
exit 1 exit 1
fi fi
if [ "$dry_run" = "true" ]; then if [ "$dry_run" = "true" ]; then
restore_files "$backup_date" true restore_files "$backup_file" true
log_message "Dry run completed. No changes were made." log_message "Dry run completed. No changes were made."
exit 0 exit 0
fi fi
# Confirm restoration # Confirm restoration
echo echo
log_warning "This will restore Plex data from backup $backup_date" log_warning "This will restore Plex data from backup $(basename "$backup_file")"
log_warning "Current Plex data will be backed up before restoration" log_warning "Current Plex data will be backed up before restoration"
read -p "Continue? (y/N): " -n 1 -r read -p "Continue? (y/N): " -n 1 -r
echo echo
@@ -204,7 +239,7 @@ main() {
fi fi
# Restore files # Restore files
if restore_files "$backup_date" false; then if restore_files "$backup_file" false; then
log_success "Restoration completed successfully" log_success "Restoration completed successfully"
log_message "Current data backup saved at: $current_backup" log_message "Current data backup saved at: $current_backup"
else else

View File

@@ -54,11 +54,11 @@ validate_backup_structure() {
return 1 return 1
fi fi
local backup_count=$(find "$BACKUP_ROOT" -maxdepth 1 -type d -name "????????" | wc -l) local backup_count=$(find "$BACKUP_ROOT" -maxdepth 1 -type f -name "plex-backup-*.tar.gz" | wc -l)
log_info "Found $backup_count backup directories" log_info "Found $backup_count backup files"
if [ "$backup_count" -eq 0 ]; then if [ "$backup_count" -eq 0 ]; then
log_warning "No backup directories found" log_warning "No backup files found"
return 1 return 1
fi fi
@@ -67,32 +67,31 @@ validate_backup_structure() {
# Validate individual backup # Validate individual backup
validate_backup() { validate_backup() {
local backup_dir="$1" local backup_file="$1"
local backup_date=$(basename "$backup_dir") local backup_name=$(basename "$backup_file")
local errors=0 local errors=0
log_info "Validating backup: $backup_date" log_info "Validating backup: $backup_name"
# Check if directory exists and is readable # Check if file exists and is readable
if [ ! -d "$backup_dir" ] || [ ! -r "$backup_dir" ]; then if [ ! -f "$backup_file" ] || [ ! -r "$backup_file" ]; then
log_error "Backup directory not accessible: $backup_dir" log_error "Backup file not accessible: $backup_file"
return 1 return 1
fi fi
# Check for expected files # Test archive integrity
for file in "${EXPECTED_FILES[@]}"; do if ! tar -tzf "$backup_file" >/dev/null 2>&1; then
local file_path="$backup_dir/$file" log_error "Archive integrity check failed: $backup_name"
if [ -f "$file_path" ]; then
# Check file size
local size=$(stat -c%s "$file_path")
if [ "$size" -gt 0 ]; then
local human_size=$(du -h "$file_path" | cut -f1)
log_success " $file ($human_size)"
else
log_error " $file is empty"
errors=$((errors + 1)) errors=$((errors + 1))
fi else
log_success "Archive integrity check passed: $backup_name"
# Check for expected files in archive
local archive_contents=$(tar -tzf "$backup_file" 2>/dev/null)
for file in "${EXPECTED_FILES[@]}"; do
if echo "$archive_contents" | grep -q "^$file$"; then
log_success " Found: $file"
else else
log_error " Missing file: $file" log_error " Missing file: $file"
errors=$((errors + 1)) errors=$((errors + 1))
@@ -100,15 +99,9 @@ validate_backup() {
done done
# Check for unexpected files # Check for unexpected files
local file_count=$(ls -1 "$backup_dir" | wc -l) echo "$archive_contents" | while IFS= read -r line; do
local expected_count=${#EXPECTED_FILES[@]} if [[ ! " ${EXPECTED_FILES[@]} " =~ " ${line} " ]]; then
log_warning " Unexpected file: $line"
if [ "$file_count" -ne "$expected_count" ]; then
log_warning " Expected $expected_count files, found $file_count"
ls -la "$backup_dir" | grep -v "^total" | grep -v "^d" | while read line; do
local filename=$(echo "$line" | awk '{print $9}')
if [[ ! " ${EXPECTED_FILES[@]} " =~ " ${filename} " ]]; then
log_warning " Unexpected file: $filename"
fi fi
done done
fi fi
@@ -120,14 +113,16 @@ validate_backup() {
check_backup_freshness() { check_backup_freshness() {
log_info "Checking backup freshness..." log_info "Checking backup freshness..."
local latest_backup=$(find "$BACKUP_ROOT" -maxdepth 1 -type d -name "????????" | sort | tail -1) local latest_backup=$(find "$BACKUP_ROOT" -maxdepth 1 -type f -name "plex-backup-*.tar.gz" | sort | tail -1)
if [ -z "$latest_backup" ]; then if [ -z "$latest_backup" ]; then
log_error "No backups found" log_error "No backups found"
return 1 return 1
fi fi
local backup_date=$(basename "$latest_backup") local backup_filename=$(basename "$latest_backup")
# Extract date from filename: plex-backup-YYYYMMDD_HHMMSS.tar.gz
local backup_date=$(echo "$backup_filename" | sed 's/plex-backup-//' | sed 's/_.*$//')
local backup_timestamp=$(date -d "${backup_date:0:4}-${backup_date:4:2}-${backup_date:6:2}" +%s) local backup_timestamp=$(date -d "${backup_date:0:4}-${backup_date:4:2}-${backup_date:6:2}" +%s)
local current_timestamp=$(date +%s) local current_timestamp=$(date +%s)
local age_days=$(( (current_timestamp - backup_timestamp) / 86400 )) local age_days=$(( (current_timestamp - backup_timestamp) / 86400 ))
@@ -205,9 +200,9 @@ generate_report() {
echo "==================================" >> "$REPORT_FILE" echo "==================================" >> "$REPORT_FILE"
# Validate each backup # Validate each backup
find "$BACKUP_ROOT" -maxdepth 1 -type d -name "????????" | sort | while read backup_dir; do find "$BACKUP_ROOT" -maxdepth 1 -type f -name "plex-backup-*.tar.gz" | sort | while read backup_file; do
total_backups=$((total_backups + 1)) total_backups=$((total_backups + 1))
validate_backup "$backup_dir" validate_backup "$backup_file"
local backup_errors=$? local backup_errors=$?
if [ "$backup_errors" -eq 0 ]; then if [ "$backup_errors" -eq 0 ]; then
@@ -239,13 +234,13 @@ fix_issues() {
log_success "JSON log file created/fixed" log_success "JSON log file created/fixed"
fi fi
# Remove empty backup directories # Clean up any remaining dated directories from old backup structure
find "$BACKUP_ROOT" -maxdepth 1 -type d -name "????????" -empty -delete 2>/dev/null || true find "$BACKUP_ROOT" -maxdepth 1 -type d -name "????????" -exec rm -rf {} \; 2>/dev/null || true
# Fix permissions if needed # Fix permissions if needed
if [ -d "$BACKUP_ROOT" ]; then if [ -d "$BACKUP_ROOT" ]; then
chmod 755 "$BACKUP_ROOT" chmod 755 "$BACKUP_ROOT"
find "$BACKUP_ROOT" -type f -exec chmod 644 {} \; 2>/dev/null || true find "$BACKUP_ROOT" -type f -name "plex-backup-*.tar.gz" -exec chmod 644 {} \; 2>/dev/null || true
log_success "Fixed backup permissions" log_success "Fixed backup permissions"
fi fi
} }