mirror of
https://github.com/acedanger/shell.git
synced 2025-12-06 01:10:12 -08:00
Remove obsolete backup and validation scripts for Plex Media Server
- Deleted `restore-plex.sh.sc2162_backup`: This script provided a comprehensive restoration process for Plex Media Server backups, including validation and logging features. - Deleted `test-plex-backup.sh.sc2086_backup`: This script served as a comprehensive test suite for the Plex backup system, covering unit tests, integration tests, and performance benchmarks. - Deleted `validate-plex-recovery.sh.sc2086_backup`: This script was responsible for validating the success of Plex database recovery operations, ensuring database integrity and service functionality.
This commit is contained in:
@@ -1,526 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
|
|
||||||
################################################################################
|
|
||||||
# Plex Backup System Integration Test Suite
|
|
||||||
################################################################################
|
|
||||||
#
|
|
||||||
# Author: Peter Wood <peter@peterwood.dev>
|
|
||||||
# Description: End-to-end integration testing framework for the complete Plex
|
|
||||||
# backup ecosystem. Tests backup, restoration, validation, and
|
|
||||||
# monitoring systems in controlled environments without affecting
|
|
||||||
# production Plex installations.
|
|
||||||
#
|
|
||||||
# Features:
|
|
||||||
# - Full workflow integration testing
|
|
||||||
# - Isolated test environment creation
|
|
||||||
# - Production-safe testing procedures
|
|
||||||
# - Multi-scenario testing (normal, error, edge cases)
|
|
||||||
# - Performance benchmarking under load
|
|
||||||
# - Service integration validation
|
|
||||||
# - Cross-script compatibility testing
|
|
||||||
#
|
|
||||||
# Related Scripts:
|
|
||||||
# - backup-plex.sh: Primary backup system under test
|
|
||||||
# - restore-plex.sh: Restoration workflow testing
|
|
||||||
# - validate-plex-backups.sh: Validation system testing
|
|
||||||
# - monitor-plex-backup.sh: Monitoring integration
|
|
||||||
# - test-plex-backup.sh: Unit testing complement
|
|
||||||
# - plex.sh: Service management integration
|
|
||||||
#
|
|
||||||
# Usage:
|
|
||||||
# ./integration-test-plex.sh # Full integration test suite
|
|
||||||
# ./integration-test-plex.sh --quick # Quick smoke tests
|
|
||||||
# ./integration-test-plex.sh --performance # Performance benchmarks
|
|
||||||
# ./integration-test-plex.sh --cleanup # Clean test artifacts
|
|
||||||
#
|
|
||||||
# Dependencies:
|
|
||||||
# - All Plex backup scripts in this directory
|
|
||||||
# - sqlite3 or Plex SQLite binary
|
|
||||||
# - Temporary filesystem space (for test environments)
|
|
||||||
# - systemctl (for service testing scenarios)
|
|
||||||
#
|
|
||||||
# Exit Codes:
|
|
||||||
# 0 - All integration tests passed
|
|
||||||
# 1 - General error
|
|
||||||
# 2 - Integration test failures
|
|
||||||
# 3 - Test environment setup failure
|
|
||||||
# 4 - Performance benchmarks failed
|
|
||||||
#
|
|
||||||
################################################################################
|
|
||||||
|
|
||||||
# Plex Backup Integration Test Suite
|
|
||||||
# This script tests the enhanced backup features in a controlled environment
|
|
||||||
# without affecting production Plex installation
|
|
||||||
|
|
||||||
set -e
|
|
||||||
|
|
||||||
# Color codes for output
|
|
||||||
RED='\033[0;31m'
|
|
||||||
GREEN='\033[0;32m'
|
|
||||||
YELLOW='\033[1;33m'
|
|
||||||
BLUE='\033[0;34m'
|
|
||||||
CYAN='\033[0;36m'
|
|
||||||
NC='\033[0m' # No Color
|
|
||||||
|
|
||||||
# Test configuration
|
|
||||||
SCRIPT_DIR="$(dirname "$(readlink -f "$0")")"
|
|
||||||
TEST_DIR="/tmp/plex-integration-test-$(date +%s)"
|
|
||||||
BACKUP_SCRIPT="$SCRIPT_DIR/backup-plex.sh"
|
|
||||||
|
|
||||||
# Test counters
|
|
||||||
INTEGRATION_TEST_FUNCTIONS=0
|
|
||||||
INTEGRATION_ASSERTIONS_PASSED=0
|
|
||||||
INTEGRATION_ASSERTIONS_FAILED=0
|
|
||||||
declare -a FAILED_INTEGRATION_TESTS=()
|
|
||||||
|
|
||||||
# Logging functions
|
|
||||||
log_test() {
|
|
||||||
local timestamp=$(date '+%Y-%m-%d %H:%M:%S')
|
|
||||||
echo -e "${CYAN}[INTEGRATION ${timestamp}]${NC} $1"
|
|
||||||
}
|
|
||||||
|
|
||||||
log_pass() {
|
|
||||||
local timestamp=$(date '+%Y-%m-%d %H:%M:%S')
|
|
||||||
echo -e "${GREEN}[PASS ${timestamp}]${NC} $1"
|
|
||||||
INTEGRATION_ASSERTIONS_PASSED=$((INTEGRATION_ASSERTIONS_PASSED + 1))
|
|
||||||
}
|
|
||||||
|
|
||||||
log_fail() {
|
|
||||||
local timestamp=$(date '+%Y-%m-%d %H:%M:%S')
|
|
||||||
echo -e "${RED}[FAIL ${timestamp}]${NC} $1"
|
|
||||||
INTEGRATION_ASSERTIONS_FAILED=$((INTEGRATION_ASSERTIONS_FAILED + 1))
|
|
||||||
FAILED_INTEGRATION_TESTS+=("$1")
|
|
||||||
}
|
|
||||||
|
|
||||||
log_info() {
|
|
||||||
local timestamp=$(date '+%Y-%m-%d %H:%M:%S')
|
|
||||||
echo -e "${BLUE}[INFO ${timestamp}]${NC} $1"
|
|
||||||
}
|
|
||||||
|
|
||||||
log_warn() {
|
|
||||||
local timestamp=$(date '+%Y-%m-%d %H:%M:%S')
|
|
||||||
echo -e "${YELLOW}[WARN ${timestamp}]${NC} $1"
|
|
||||||
}
|
|
||||||
|
|
||||||
# Setup integration test environment
|
|
||||||
setup_integration_environment() {
|
|
||||||
log_info "Setting up integration test environment"
|
|
||||||
|
|
||||||
# Create test directories
|
|
||||||
mkdir -p "$TEST_DIR"
|
|
||||||
mkdir -p "$TEST_DIR/mock_plex_data"
|
|
||||||
mkdir -p "$TEST_DIR/backup_destination"
|
|
||||||
mkdir -p "$TEST_DIR/logs"
|
|
||||||
|
|
||||||
# Create mock Plex database files with realistic content
|
|
||||||
create_mock_database "$TEST_DIR/mock_plex_data/com.plexapp.plugins.library.db"
|
|
||||||
create_mock_database "$TEST_DIR/mock_plex_data/com.plexapp.plugins.library.blobs.db"
|
|
||||||
|
|
||||||
# Create mock Preferences.xml
|
|
||||||
create_mock_preferences "$TEST_DIR/mock_plex_data/Preferences.xml"
|
|
||||||
|
|
||||||
# Create mock WAL files to test WAL handling
|
|
||||||
echo "WAL data simulation" > "$TEST_DIR/mock_plex_data/com.plexapp.plugins.library.db-wal"
|
|
||||||
echo "SHM data simulation" > "$TEST_DIR/mock_plex_data/com.plexapp.plugins.library.db-shm"
|
|
||||||
|
|
||||||
log_info "Integration test environment ready"
|
|
||||||
}
|
|
||||||
|
|
||||||
# Create mock SQLite database for testing
|
|
||||||
create_mock_database() {
|
|
||||||
local db_file="$1"
|
|
||||||
|
|
||||||
# Create a proper SQLite database with some test data
|
|
||||||
sqlite3 "$db_file" << 'EOF'
|
|
||||||
CREATE TABLE library_sections (
|
|
||||||
id INTEGER PRIMARY KEY,
|
|
||||||
name TEXT,
|
|
||||||
type INTEGER,
|
|
||||||
agent TEXT
|
|
||||||
);
|
|
||||||
|
|
||||||
INSERT INTO library_sections (name, type, agent) VALUES
|
|
||||||
('Movies', 1, 'com.plexapp.agents.imdb'),
|
|
||||||
('TV Shows', 2, 'com.plexapp.agents.thetvdb'),
|
|
||||||
('Music', 8, 'com.plexapp.agents.lastfm');
|
|
||||||
|
|
||||||
CREATE TABLE metadata_items (
|
|
||||||
id INTEGER PRIMARY KEY,
|
|
||||||
title TEXT,
|
|
||||||
year INTEGER,
|
|
||||||
added_at DATETIME DEFAULT CURRENT_TIMESTAMP
|
|
||||||
);
|
|
||||||
|
|
||||||
INSERT INTO metadata_items (title, year) VALUES
|
|
||||||
('Test Movie', 2023),
|
|
||||||
('Another Movie', 2024),
|
|
||||||
('Test Show', 2022);
|
|
||||||
|
|
||||||
-- Add some indexes to make it more realistic
|
|
||||||
CREATE INDEX idx_metadata_title ON metadata_items(title);
|
|
||||||
CREATE INDEX idx_library_sections_type ON library_sections(type);
|
|
||||||
EOF
|
|
||||||
|
|
||||||
log_info "Created mock database: $(basename "$db_file")"
|
|
||||||
}
|
|
||||||
|
|
||||||
# Create mock Preferences.xml
|
|
||||||
create_mock_preferences() {
|
|
||||||
local pref_file="$1"
|
|
||||||
|
|
||||||
cat > "$pref_file" << 'EOF'
|
|
||||||
<?xml version="1.0" encoding="utf-8"?>
|
|
||||||
<Preferences OldestPreviousVersion="1.32.8.7639-fb6452ebf" MachineIdentifier="test-machine-12345" ProcessedMachineIdentifier="test-processed-12345" AnonymousMachineIdentifier="test-anon-12345" FriendlyName="Test Plex Server" ManualPortMappingMode="1" TranscoderTempDirectory="/tmp" />
|
|
||||||
EOF
|
|
||||||
|
|
||||||
log_info "Created mock preferences file"
|
|
||||||
}
|
|
||||||
|
|
||||||
# Test command line argument parsing
|
|
||||||
test_command_line_parsing() {
|
|
||||||
INTEGRATION_TEST_FUNCTIONS=$((INTEGRATION_TEST_FUNCTIONS + 1))
|
|
||||||
log_test "Command Line Argument Parsing"
|
|
||||||
|
|
||||||
# Test help output
|
|
||||||
if "$BACKUP_SCRIPT" --help | grep -q "Usage:"; then
|
|
||||||
log_pass "Help output is functional"
|
|
||||||
else
|
|
||||||
log_fail "Help output test failed"
|
|
||||||
return 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Test invalid argument handling
|
|
||||||
if ! "$BACKUP_SCRIPT" --invalid-option >/dev/null 2>&1; then
|
|
||||||
log_pass "Invalid argument handling works correctly"
|
|
||||||
else
|
|
||||||
log_fail "Invalid argument handling test failed"
|
|
||||||
return 1
|
|
||||||
fi
|
|
||||||
}
|
|
||||||
|
|
||||||
# Test performance monitoring features
|
|
||||||
test_performance_monitoring() {
|
|
||||||
INTEGRATION_TEST_FUNCTIONS=$((INTEGRATION_TEST_FUNCTIONS + 1))
|
|
||||||
log_test "Performance Monitoring Features"
|
|
||||||
|
|
||||||
local test_perf_log="$TEST_DIR/test-performance.json"
|
|
||||||
|
|
||||||
# Initialize performance log
|
|
||||||
echo "[]" > "$test_perf_log"
|
|
||||||
|
|
||||||
# Simulate performance tracking
|
|
||||||
local start_time=$(date +%s)
|
|
||||||
sleep 1
|
|
||||||
local end_time=$(date +%s)
|
|
||||||
local duration=$((end_time - start_time))
|
|
||||||
|
|
||||||
# Create performance entry
|
|
||||||
local entry=$(jq -n \
|
|
||||||
--arg operation "integration_test" \
|
|
||||||
--arg duration "$duration" \
|
|
||||||
--arg timestamp "$(date -Iseconds)" \
|
|
||||||
'{
|
|
||||||
operation: $operation,
|
|
||||||
duration_seconds: ($duration | tonumber),
|
|
||||||
timestamp: $timestamp
|
|
||||||
}')
|
|
||||||
|
|
||||||
# Add to log
|
|
||||||
jq --argjson entry "$entry" '. += [$entry]' "$test_perf_log" > "${test_perf_log}.tmp" && \
|
|
||||||
mv "${test_perf_log}.tmp" "$test_perf_log"
|
|
||||||
|
|
||||||
# Verify entry was added
|
|
||||||
local entry_count=$(jq length "$test_perf_log")
|
|
||||||
if [ "$entry_count" -eq 1 ]; then
|
|
||||||
log_pass "Performance monitoring integration works"
|
|
||||||
else
|
|
||||||
log_fail "Performance monitoring integration failed"
|
|
||||||
return 1
|
|
||||||
fi
|
|
||||||
}
|
|
||||||
|
|
||||||
# Test notification system with mock endpoints
|
|
||||||
test_notification_system() {
|
|
||||||
INTEGRATION_TEST_FUNCTIONS=$((INTEGRATION_TEST_FUNCTIONS + 1))
|
|
||||||
log_test "Notification System Integration"
|
|
||||||
|
|
||||||
# Test webhook notification (mock)
|
|
||||||
local webhook_test_log="$TEST_DIR/webhook_test.log"
|
|
||||||
|
|
||||||
# Mock webhook function
|
|
||||||
test_send_webhook() {
|
|
||||||
local url="$1"
|
|
||||||
local payload="$2"
|
|
||||||
|
|
||||||
# Simulate webhook call
|
|
||||||
echo "Webhook URL: $url" > "$webhook_test_log"
|
|
||||||
echo "Payload: $payload" >> "$webhook_test_log"
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
|
|
||||||
# Test notification
|
|
||||||
if test_send_webhook "https://example.com/webhook" '{"test": "data"}'; then
|
|
||||||
if [ -f "$webhook_test_log" ] && grep -q "Webhook URL" "$webhook_test_log"; then
|
|
||||||
log_pass "Webhook notification integration works"
|
|
||||||
else
|
|
||||||
log_fail "Webhook notification integration failed"
|
|
||||||
return 1
|
|
||||||
fi
|
|
||||||
else
|
|
||||||
log_fail "Webhook notification test failed"
|
|
||||||
return 1
|
|
||||||
fi
|
|
||||||
}
|
|
||||||
|
|
||||||
# Test backup validation system
|
|
||||||
test_backup_validation() {
|
|
||||||
INTEGRATION_TEST_FUNCTIONS=$((INTEGRATION_TEST_FUNCTIONS + 1))
|
|
||||||
log_test "Backup Validation System"
|
|
||||||
|
|
||||||
local test_backup_dir="$TEST_DIR/test_backup_20250525"
|
|
||||||
mkdir -p "$test_backup_dir"
|
|
||||||
|
|
||||||
# Create test backup files
|
|
||||||
cp "$TEST_DIR/mock_plex_data/"*.db "$test_backup_dir/"
|
|
||||||
cp "$TEST_DIR/mock_plex_data/Preferences.xml" "$test_backup_dir/"
|
|
||||||
|
|
||||||
# Test validation script
|
|
||||||
if [ -f "$SCRIPT_DIR/validate-plex-backups.sh" ]; then
|
|
||||||
# Mock the validation by checking file presence
|
|
||||||
local files_present=0
|
|
||||||
for file in com.plexapp.plugins.library.db com.plexapp.plugins.library.blobs.db Preferences.xml; do
|
|
||||||
if [ -f "$test_backup_dir/$file" ]; then
|
|
||||||
files_present=$((files_present + 1))
|
|
||||||
fi
|
|
||||||
done
|
|
||||||
|
|
||||||
if [ "$files_present" -eq 3 ]; then
|
|
||||||
log_pass "Backup validation system works"
|
|
||||||
else
|
|
||||||
log_fail "Backup validation system failed - missing files"
|
|
||||||
return 1
|
|
||||||
fi
|
|
||||||
else
|
|
||||||
log_warn "Validation script not found, skipping test"
|
|
||||||
fi
|
|
||||||
}
|
|
||||||
|
|
||||||
# Test database integrity checking
|
|
||||||
test_database_integrity_checking() {
|
|
||||||
INTEGRATION_TEST_FUNCTIONS=$((INTEGRATION_TEST_FUNCTIONS + 1))
|
|
||||||
log_test "Database Integrity Checking"
|
|
||||||
|
|
||||||
# Test with good database
|
|
||||||
local test_db="$TEST_DIR/mock_plex_data/com.plexapp.plugins.library.db"
|
|
||||||
|
|
||||||
# Run integrity check using sqlite3 (since we can't use Plex SQLite in test)
|
|
||||||
if sqlite3 "$test_db" "PRAGMA integrity_check;" | grep -q "ok"; then
|
|
||||||
log_pass "Database integrity checking works for valid database"
|
|
||||||
else
|
|
||||||
log_fail "Database integrity checking failed for valid database"
|
|
||||||
return 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Test with corrupted database
|
|
||||||
local corrupted_db="$TEST_DIR/corrupted.db"
|
|
||||||
echo "This is not a valid SQLite database" > "$corrupted_db"
|
|
||||||
|
|
||||||
if ! sqlite3 "$corrupted_db" "PRAGMA integrity_check;" 2>/dev/null | grep -q "ok"; then
|
|
||||||
log_pass "Database integrity checking correctly detects corruption"
|
|
||||||
else
|
|
||||||
log_fail "Database integrity checking failed to detect corruption"
|
|
||||||
return 1
|
|
||||||
fi
|
|
||||||
}
|
|
||||||
|
|
||||||
# Test parallel processing capabilities
|
|
||||||
test_parallel_processing() {
|
|
||||||
INTEGRATION_TEST_FUNCTIONS=$((INTEGRATION_TEST_FUNCTIONS + 1))
|
|
||||||
log_test "Parallel Processing Capabilities"
|
|
||||||
|
|
||||||
local temp_dir=$(mktemp -d)
|
|
||||||
local -a pids=()
|
|
||||||
local total_jobs=3
|
|
||||||
local completed_jobs=0
|
|
||||||
|
|
||||||
# Start parallel jobs
|
|
||||||
for i in $(seq 1 $total_jobs); do
|
|
||||||
(
|
|
||||||
# Simulate parallel work
|
|
||||||
sleep 0.$i
|
|
||||||
echo "Job $i completed" > "$temp_dir/job_$i.result"
|
|
||||||
) &
|
|
||||||
pids+=($!)
|
|
||||||
done
|
|
||||||
|
|
||||||
# Wait for all jobs
|
|
||||||
for pid in "${pids[@]}"; do
|
|
||||||
if wait "$pid"; then
|
|
||||||
completed_jobs=$((completed_jobs + 1))
|
|
||||||
fi
|
|
||||||
done
|
|
||||||
|
|
||||||
# Verify results
|
|
||||||
local result_files=$(find "$temp_dir" -name "job_*.result" | wc -l)
|
|
||||||
|
|
||||||
# Cleanup
|
|
||||||
rm -rf "$temp_dir"
|
|
||||||
|
|
||||||
if [ "$completed_jobs" -eq "$total_jobs" ] && [ "$result_files" -eq "$total_jobs" ]; then
|
|
||||||
log_pass "Parallel processing works correctly"
|
|
||||||
else
|
|
||||||
log_fail "Parallel processing test failed"
|
|
||||||
return 1
|
|
||||||
fi
|
|
||||||
}
|
|
||||||
|
|
||||||
# Test checksum caching system
|
|
||||||
test_checksum_caching() {
|
|
||||||
INTEGRATION_TEST_FUNCTIONS=$((INTEGRATION_TEST_FUNCTIONS + 1))
|
|
||||||
log_test "Checksum Caching System"
|
|
||||||
|
|
||||||
local test_file="$TEST_DIR/checksum_test.txt"
|
|
||||||
local cache_file="${test_file}.md5"
|
|
||||||
|
|
||||||
# Create test file
|
|
||||||
echo "checksum test content" > "$test_file"
|
|
||||||
|
|
||||||
# First checksum calculation (should create cache)
|
|
||||||
local checksum1=$(md5sum "$test_file" | cut -d' ' -f1)
|
|
||||||
echo "$checksum1" > "$cache_file"
|
|
||||||
|
|
||||||
# Simulate cache check
|
|
||||||
local file_mtime=$(stat -c %Y "$test_file")
|
|
||||||
local cache_mtime=$(stat -c %Y "$cache_file")
|
|
||||||
|
|
||||||
if [ "$cache_mtime" -ge "$file_mtime" ]; then
|
|
||||||
local cached_checksum=$(cat "$cache_file")
|
|
||||||
if [ "$cached_checksum" = "$checksum1" ]; then
|
|
||||||
log_pass "Checksum caching system works correctly"
|
|
||||||
else
|
|
||||||
log_fail "Checksum caching system failed - checksum mismatch"
|
|
||||||
return 1
|
|
||||||
fi
|
|
||||||
else
|
|
||||||
log_fail "Checksum caching system failed - cache timing issue"
|
|
||||||
return 1
|
|
||||||
fi
|
|
||||||
}
|
|
||||||
|
|
||||||
# Test WAL file handling
|
|
||||||
test_wal_file_handling() {
|
|
||||||
INTEGRATION_TEST_FUNCTIONS=$((INTEGRATION_TEST_FUNCTIONS + 1))
|
|
||||||
log_test "WAL File Handling"
|
|
||||||
|
|
||||||
local test_db="$TEST_DIR/mock_plex_data/com.plexapp.plugins.library.db"
|
|
||||||
local wal_file="${test_db}-wal"
|
|
||||||
local shm_file="${test_db}-shm"
|
|
||||||
|
|
||||||
# Verify WAL files exist
|
|
||||||
if [ -f "$wal_file" ] && [ -f "$shm_file" ]; then
|
|
||||||
# Test WAL checkpoint simulation
|
|
||||||
if sqlite3 "$test_db" "PRAGMA wal_checkpoint(FULL);" 2>/dev/null; then
|
|
||||||
log_pass "WAL file handling works correctly"
|
|
||||||
else
|
|
||||||
log_pass "WAL checkpoint simulation completed (mock environment)"
|
|
||||||
fi
|
|
||||||
else
|
|
||||||
log_pass "WAL file handling test completed (no WAL files in mock)"
|
|
||||||
fi
|
|
||||||
}
|
|
||||||
|
|
||||||
# Cleanup integration test environment
|
|
||||||
cleanup_integration_environment() {
|
|
||||||
if [ -d "$TEST_DIR" ]; then
|
|
||||||
log_info "Cleaning up integration test environment"
|
|
||||||
rm -rf "$TEST_DIR"
|
|
||||||
fi
|
|
||||||
}
|
|
||||||
|
|
||||||
# Generate integration test report
|
|
||||||
generate_integration_report() {
|
|
||||||
local timestamp=$(date '+%Y-%m-%d %H:%M:%S')
|
|
||||||
|
|
||||||
echo
|
|
||||||
echo "=================================================="
|
|
||||||
echo " PLEX BACKUP INTEGRATION TEST REPORT"
|
|
||||||
echo "=================================================="
|
|
||||||
echo "Test Run: $timestamp"
|
|
||||||
echo "Test Functions: $INTEGRATION_TEST_FUNCTIONS"
|
|
||||||
echo "Total Assertions: $((INTEGRATION_ASSERTIONS_PASSED + INTEGRATION_ASSERTIONS_FAILED))"
|
|
||||||
echo "Assertions Passed: $INTEGRATION_ASSERTIONS_PASSED"
|
|
||||||
echo "Assertions Failed: $INTEGRATION_ASSERTIONS_FAILED"
|
|
||||||
echo
|
|
||||||
|
|
||||||
if [ $INTEGRATION_ASSERTIONS_FAILED -gt 0 ]; then
|
|
||||||
echo "FAILED ASSERTIONS:"
|
|
||||||
for failed_test in "${FAILED_INTEGRATION_TESTS[@]}"; do
|
|
||||||
echo " - $failed_test"
|
|
||||||
done
|
|
||||||
echo
|
|
||||||
fi
|
|
||||||
|
|
||||||
local success_rate=0
|
|
||||||
local total_assertions=$((INTEGRATION_ASSERTIONS_PASSED + INTEGRATION_ASSERTIONS_FAILED))
|
|
||||||
if [ $total_assertions -gt 0 ]; then
|
|
||||||
success_rate=$(( (INTEGRATION_ASSERTIONS_PASSED * 100) / total_assertions ))
|
|
||||||
fi
|
|
||||||
|
|
||||||
echo "Success Rate: ${success_rate}%"
|
|
||||||
echo
|
|
||||||
|
|
||||||
if [ $INTEGRATION_ASSERTIONS_FAILED -eq 0 ]; then
|
|
||||||
log_pass "All integration tests passed successfully!"
|
|
||||||
echo
|
|
||||||
echo "✅ The enhanced Plex backup system is ready for production use!"
|
|
||||||
echo
|
|
||||||
echo "Next Steps:"
|
|
||||||
echo " 1. Test with real webhook endpoints if using webhook notifications"
|
|
||||||
echo " 2. Test email notifications with configured sendmail"
|
|
||||||
echo " 3. Run a test backup in a non-production environment"
|
|
||||||
echo " 4. Set up automated backup scheduling with cron"
|
|
||||||
echo " 5. Monitor performance logs for optimization opportunities"
|
|
||||||
else
|
|
||||||
log_fail "Some integration tests failed - review output above"
|
|
||||||
fi
|
|
||||||
}
|
|
||||||
|
|
||||||
# Main execution
|
|
||||||
main() {
|
|
||||||
log_info "Starting Plex Backup Integration Tests"
|
|
||||||
|
|
||||||
# Ensure backup script exists
|
|
||||||
if [ ! -f "$BACKUP_SCRIPT" ]; then
|
|
||||||
log_fail "Backup script not found: $BACKUP_SCRIPT"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Setup test environment
|
|
||||||
setup_integration_environment
|
|
||||||
|
|
||||||
# Trap cleanup on exit
|
|
||||||
trap cleanup_integration_environment EXIT SIGINT SIGTERM
|
|
||||||
|
|
||||||
# Run integration tests
|
|
||||||
test_command_line_parsing
|
|
||||||
test_performance_monitoring
|
|
||||||
test_notification_system
|
|
||||||
test_backup_validation
|
|
||||||
test_database_integrity_checking
|
|
||||||
test_parallel_processing
|
|
||||||
test_checksum_caching
|
|
||||||
test_wal_file_handling
|
|
||||||
|
|
||||||
# Generate report
|
|
||||||
generate_integration_report
|
|
||||||
|
|
||||||
# Return appropriate exit code
|
|
||||||
if [ $INTEGRATION_ASSERTIONS_FAILED -eq 0 ]; then
|
|
||||||
exit 0
|
|
||||||
else
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
}
|
|
||||||
|
|
||||||
# Run main function
|
|
||||||
main "$@"
|
|
||||||
@@ -1,701 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
|
|
||||||
################################################################################
|
|
||||||
# Advanced Plex Database Recovery Script
|
|
||||||
################################################################################
|
|
||||||
#
|
|
||||||
# Author: Peter Wood <peter@peterwood.dev>
|
|
||||||
# Description: Advanced database recovery script with multiple repair strategies
|
|
||||||
# for corrupted Plex databases. Implements progressive recovery
|
|
||||||
# techniques from gentle repairs to aggressive reconstruction
|
|
||||||
# methods, with comprehensive logging and rollback capabilities.
|
|
||||||
#
|
|
||||||
# Features:
|
|
||||||
# - Progressive recovery strategy (gentle to aggressive)
|
|
||||||
# - Multiple repair techniques (VACUUM, dump/restore, rebuild)
|
|
||||||
# - Automatic backup before any recovery attempts
|
|
||||||
# - Database integrity verification at each step
|
|
||||||
# - Rollback capability if recovery fails
|
|
||||||
# - Dry-run mode for safe testing
|
|
||||||
# - Comprehensive logging and reporting
|
|
||||||
#
|
|
||||||
# Related Scripts:
|
|
||||||
# - backup-plex.sh: Creates backups for recovery scenarios
|
|
||||||
# - icu-aware-recovery.sh: ICU-specific recovery methods
|
|
||||||
# - nuclear-plex-recovery.sh: Last-resort complete replacement
|
|
||||||
# - validate-plex-recovery.sh: Validates recovery results
|
|
||||||
# - restore-plex.sh: Standard restoration from backups
|
|
||||||
# - plex.sh: General Plex service management
|
|
||||||
#
|
|
||||||
# Usage:
|
|
||||||
# ./recover-plex-database.sh # Interactive recovery
|
|
||||||
# ./recover-plex-database.sh --auto # Automated recovery
|
|
||||||
# ./recover-plex-database.sh --dry-run # Show recovery plan
|
|
||||||
# ./recover-plex-database.sh --gentle # Gentle repair only
|
|
||||||
# ./recover-plex-database.sh --aggressive # Aggressive repair methods
|
|
||||||
#
|
|
||||||
# Dependencies:
|
|
||||||
# - sqlite3 or Plex SQLite binary
|
|
||||||
# - systemctl (for service management)
|
|
||||||
# - Sufficient disk space for backups and temp files
|
|
||||||
#
|
|
||||||
# Exit Codes:
|
|
||||||
# 0 - Recovery successful
|
|
||||||
# 1 - General error
|
|
||||||
# 2 - Database corruption beyond repair
|
|
||||||
# 3 - Service management failure
|
|
||||||
# 4 - Insufficient disk space
|
|
||||||
# 5 - Recovery partially successful (manual intervention needed)
|
|
||||||
#
|
|
||||||
################################################################################
|
|
||||||
|
|
||||||
# Advanced Plex Database Recovery Script
|
|
||||||
# Usage: ./recover-plex-database.sh [--auto] [--dry-run]
|
|
||||||
|
|
||||||
set -e
|
|
||||||
|
|
||||||
# Color codes for output
|
|
||||||
RED='\033[0;31m'
|
|
||||||
GREEN='\033[0;32m'
|
|
||||||
YELLOW='\033[1;33m'
|
|
||||||
BLUE='\033[0;34m'
|
|
||||||
CYAN='\033[0;36m'
|
|
||||||
NC='\033[0m' # No Color
|
|
||||||
|
|
||||||
# Configuration
|
|
||||||
SCRIPT_DIR="$(dirname "$(readlink -f "$0")")"
|
|
||||||
PLEX_DB_DIR="/var/lib/plexmediaserver/Library/Application Support/Plex Media Server/Plug-in Support/Databases"
|
|
||||||
MAIN_DB="com.plexapp.plugins.library.db"
|
|
||||||
BLOBS_DB="com.plexapp.plugins.library.blobs.db"
|
|
||||||
PLEX_SQLITE="/usr/lib/plexmediaserver/Plex SQLite"
|
|
||||||
BACKUP_SUFFIX="recovery-$(date +%Y%m%d_%H%M%S)"
|
|
||||||
RECOVERY_LOG="$SCRIPT_DIR/logs/database-recovery-$(date +%Y%m%d_%H%M%S).log"
|
|
||||||
|
|
||||||
# Script options
|
|
||||||
AUTO_MODE=false
|
|
||||||
DRY_RUN=false
|
|
||||||
|
|
||||||
# Ensure logs directory exists
|
|
||||||
mkdir -p "$SCRIPT_DIR/logs"
|
|
||||||
|
|
||||||
# Logging function
|
|
||||||
log_message() {
|
|
||||||
local message="[$(date '+%Y-%m-%d %H:%M:%S')] $1"
|
|
||||||
echo -e "$message"
|
|
||||||
echo "$message" >> "$RECOVERY_LOG"
|
|
||||||
}
|
|
||||||
|
|
||||||
log_success() {
|
|
||||||
log_message "${GREEN}SUCCESS: $1${NC}"
|
|
||||||
}
|
|
||||||
|
|
||||||
log_error() {
|
|
||||||
log_message "${RED}ERROR: $1${NC}"
|
|
||||||
}
|
|
||||||
|
|
||||||
log_warning() {
|
|
||||||
log_message "${YELLOW}WARNING: $1${NC}"
|
|
||||||
}
|
|
||||||
|
|
||||||
log_info() {
|
|
||||||
log_message "${BLUE}INFO: $1${NC}"
|
|
||||||
}
|
|
||||||
|
|
||||||
# Parse command line arguments
|
|
||||||
while [[ $# -gt 0 ]]; do
|
|
||||||
case $1 in
|
|
||||||
--auto)
|
|
||||||
AUTO_MODE=true
|
|
||||||
shift
|
|
||||||
;;
|
|
||||||
--dry-run)
|
|
||||||
DRY_RUN=true
|
|
||||||
shift
|
|
||||||
;;
|
|
||||||
-h|--help)
|
|
||||||
echo "Usage: $0 [--auto] [--dry-run] [--help]"
|
|
||||||
echo ""
|
|
||||||
echo "Options:"
|
|
||||||
echo " --auto Automatically attempt all recovery methods without prompts"
|
|
||||||
echo " --dry-run Show what would be done without making changes"
|
|
||||||
echo " --help Show this help message"
|
|
||||||
echo ""
|
|
||||||
echo "Recovery Methods (in order):"
|
|
||||||
echo " 1. SQLite .recover command (modern SQLite recovery)"
|
|
||||||
echo " 2. Partial table extraction with LIMIT"
|
|
||||||
echo " 3. Emergency data extraction"
|
|
||||||
echo " 4. Backup restoration from most recent good backup"
|
|
||||||
echo ""
|
|
||||||
exit 0
|
|
||||||
;;
|
|
||||||
*)
|
|
||||||
log_error "Unknown option: $1"
|
|
||||||
exit 1
|
|
||||||
;;
|
|
||||||
esac
|
|
||||||
done
|
|
||||||
|
|
||||||
# Check dependencies
|
|
||||||
check_dependencies() {
|
|
||||||
log_info "Checking dependencies..."
|
|
||||||
|
|
||||||
if [ ! -f "$PLEX_SQLITE" ]; then
|
|
||||||
log_error "Plex SQLite binary not found at: $PLEX_SQLITE"
|
|
||||||
return 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
if ! command -v sqlite3 >/dev/null 2>&1; then
|
|
||||||
log_error "Standard sqlite3 command not found"
|
|
||||||
return 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Make Plex SQLite executable
|
|
||||||
sudo chmod +x "$PLEX_SQLITE" 2>/dev/null || true
|
|
||||||
|
|
||||||
log_success "Dependencies check passed"
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
|
|
||||||
# Stop Plex service safely
|
|
||||||
stop_plex_service() {
|
|
||||||
log_info "Stopping Plex Media Server..."
|
|
||||||
|
|
||||||
if [ "$DRY_RUN" = true ]; then
|
|
||||||
log_info "DRY RUN: Would stop Plex service"
|
|
||||||
return 0
|
|
||||||
fi
|
|
||||||
|
|
||||||
if sudo systemctl is-active --quiet plexmediaserver; then
|
|
||||||
sudo systemctl stop plexmediaserver
|
|
||||||
|
|
||||||
# Wait for service to fully stop
|
|
||||||
local timeout=30
|
|
||||||
while sudo systemctl is-active --quiet plexmediaserver && [ $timeout -gt 0 ]; do
|
|
||||||
sleep 1
|
|
||||||
timeout=$((timeout - 1))
|
|
||||||
done
|
|
||||||
|
|
||||||
if sudo systemctl is-active --quiet plexmediaserver; then
|
|
||||||
log_error "Failed to stop Plex service within timeout"
|
|
||||||
return 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
log_success "Plex service stopped successfully"
|
|
||||||
else
|
|
||||||
log_info "Plex service was already stopped"
|
|
||||||
fi
|
|
||||||
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
|
|
||||||
# Start Plex service
|
|
||||||
start_plex_service() {
|
|
||||||
log_info "Starting Plex Media Server..."
|
|
||||||
|
|
||||||
if [ "$DRY_RUN" = true ]; then
|
|
||||||
log_info "DRY RUN: Would start Plex service"
|
|
||||||
return 0
|
|
||||||
fi
|
|
||||||
|
|
||||||
sudo systemctl start plexmediaserver
|
|
||||||
|
|
||||||
# Wait for service to start
|
|
||||||
local timeout=30
|
|
||||||
while ! sudo systemctl is-active --quiet plexmediaserver && [ $timeout -gt 0 ]; do
|
|
||||||
sleep 1
|
|
||||||
timeout=$((timeout - 1))
|
|
||||||
done
|
|
||||||
|
|
||||||
if sudo systemctl is-active --quiet plexmediaserver; then
|
|
||||||
log_success "Plex service started successfully"
|
|
||||||
else
|
|
||||||
log_warning "Plex service may not have started properly"
|
|
||||||
fi
|
|
||||||
}
|
|
||||||
|
|
||||||
# Check database integrity
|
|
||||||
check_database_integrity() {
|
|
||||||
local db_file="$1"
|
|
||||||
local db_name=$(basename "$db_file")
|
|
||||||
|
|
||||||
log_info "Checking integrity of $db_name..."
|
|
||||||
|
|
||||||
if [ ! -f "$db_file" ]; then
|
|
||||||
log_error "Database file not found: $db_file"
|
|
||||||
return 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
local integrity_result
|
|
||||||
integrity_result=$(sudo "$PLEX_SQLITE" "$db_file" "PRAGMA integrity_check;" 2>&1)
|
|
||||||
local check_exit_code=$?
|
|
||||||
|
|
||||||
if [ $check_exit_code -ne 0 ]; then
|
|
||||||
log_error "Failed to run integrity check on $db_name"
|
|
||||||
return 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
if echo "$integrity_result" | grep -q "^ok$"; then
|
|
||||||
log_success "Database integrity check passed: $db_name"
|
|
||||||
return 0
|
|
||||||
else
|
|
||||||
log_warning "Database integrity issues detected in $db_name:"
|
|
||||||
echo "$integrity_result" | while IFS= read -r line; do
|
|
||||||
log_warning " $line"
|
|
||||||
done
|
|
||||||
return 1
|
|
||||||
fi
|
|
||||||
}
|
|
||||||
|
|
||||||
# Recovery Method 1: SQLite .recover command
|
|
||||||
recovery_method_sqlite_recover() {
|
|
||||||
local db_file="$1"
|
|
||||||
local db_name=$(basename "$db_file")
|
|
||||||
local recovered_sql="${db_file}.recovered.sql"
|
|
||||||
local new_db="${db_file}.recovered"
|
|
||||||
|
|
||||||
log_info "Recovery Method 1: SQLite .recover command for $db_name"
|
|
||||||
|
|
||||||
if [ "$DRY_RUN" = true ]; then
|
|
||||||
log_info "DRY RUN: Would attempt SQLite .recover method"
|
|
||||||
return 0
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Check if .recover is available (SQLite 3.37.0+)
|
|
||||||
if ! echo ".help" | sqlite3 2>/dev/null | grep -q "\.recover"; then
|
|
||||||
log_warning "SQLite .recover command not available in this version"
|
|
||||||
return 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
log_info "Attempting SQLite .recover method..."
|
|
||||||
|
|
||||||
# Use standard sqlite3 for .recover as it's more reliable
|
|
||||||
if sqlite3 "$db_file" ".recover" > "$recovered_sql" 2>/dev/null; then
|
|
||||||
log_success "Recovery SQL generated successfully"
|
|
||||||
|
|
||||||
# Create new database from recovered data
|
|
||||||
if [ -f "$recovered_sql" ] && [ -s "$recovered_sql" ]; then
|
|
||||||
if sqlite3 "$new_db" < "$recovered_sql" 2>/dev/null; then
|
|
||||||
log_success "New database created from recovered data"
|
|
||||||
|
|
||||||
# Verify new database integrity
|
|
||||||
if sqlite3 "$new_db" "PRAGMA integrity_check;" | grep -q "ok"; then
|
|
||||||
log_success "Recovered database integrity verified"
|
|
||||||
|
|
||||||
# Replace original with recovered database
|
|
||||||
if sudo mv "$db_file" "${db_file}.corrupted" && sudo mv "$new_db" "$db_file"; then
|
|
||||||
sudo chown plex:plex "$db_file"
|
|
||||||
sudo chmod 644 "$db_file"
|
|
||||||
log_success "Database successfully recovered using .recover method"
|
|
||||||
|
|
||||||
# Clean up
|
|
||||||
rm -f "$recovered_sql"
|
|
||||||
|
|
||||||
return 0
|
|
||||||
else
|
|
||||||
log_error "Failed to replace original database"
|
|
||||||
fi
|
|
||||||
else
|
|
||||||
log_error "Recovered database failed integrity check"
|
|
||||||
fi
|
|
||||||
else
|
|
||||||
log_error "Failed to create database from recovered SQL"
|
|
||||||
fi
|
|
||||||
else
|
|
||||||
log_error "Recovery SQL file is empty or not generated"
|
|
||||||
fi
|
|
||||||
else
|
|
||||||
log_error "SQLite .recover command failed"
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Clean up on failure
|
|
||||||
rm -f "$recovered_sql" "$new_db"
|
|
||||||
return 1
|
|
||||||
}
|
|
||||||
|
|
||||||
# Recovery Method 2: Partial table extraction
|
|
||||||
recovery_method_partial_extraction() {
|
|
||||||
local db_file="$1"
|
|
||||||
local db_name=$(basename "$db_file")
|
|
||||||
local partial_sql="${db_file}.partial.sql"
|
|
||||||
local new_db="${db_file}.partial"
|
|
||||||
|
|
||||||
log_info "Recovery Method 2: Partial table extraction for $db_name"
|
|
||||||
|
|
||||||
if [ "$DRY_RUN" = true ]; then
|
|
||||||
log_info "DRY RUN: Would attempt partial extraction method"
|
|
||||||
return 0
|
|
||||||
fi
|
|
||||||
|
|
||||||
log_info "Extracting schema and partial data..."
|
|
||||||
|
|
||||||
# Start the SQL file with schema
|
|
||||||
{
|
|
||||||
echo "-- Partial recovery of $db_name"
|
|
||||||
echo "-- Generated on $(date)"
|
|
||||||
echo ""
|
|
||||||
} > "$partial_sql"
|
|
||||||
|
|
||||||
# Extract schema
|
|
||||||
if sudo "$PLEX_SQLITE" "$db_file" ".schema" >> "$partial_sql" 2>/dev/null; then
|
|
||||||
log_success "Schema extracted successfully"
|
|
||||||
else
|
|
||||||
log_warning "Schema extraction failed, trying alternative method"
|
|
||||||
# Try with standard sqlite3
|
|
||||||
if sqlite3 "$db_file" ".schema" >> "$partial_sql" 2>/dev/null; then
|
|
||||||
log_success "Schema extracted with standard sqlite3"
|
|
||||||
else
|
|
||||||
log_error "Schema extraction failed completely"
|
|
||||||
rm -f "$partial_sql"
|
|
||||||
return 1
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Critical tables to extract (in order of importance)
|
|
||||||
local critical_tables=(
|
|
||||||
"accounts"
|
|
||||||
"library_sections"
|
|
||||||
"directories"
|
|
||||||
"metadata_items"
|
|
||||||
"media_items"
|
|
||||||
"media_parts"
|
|
||||||
"media_streams"
|
|
||||||
"taggings"
|
|
||||||
"tags"
|
|
||||||
)
|
|
||||||
|
|
||||||
log_info "Attempting to extract critical tables..."
|
|
||||||
|
|
||||||
for table in "${critical_tables[@]}"; do
|
|
||||||
log_info "Extracting table: $table"
|
|
||||||
|
|
||||||
# Try to extract with LIMIT to avoid hitting corrupted data
|
|
||||||
local extract_success=false
|
|
||||||
local limit=10000
|
|
||||||
|
|
||||||
while [ $limit -le 100000 ] && [ "$extract_success" = false ]; do
|
|
||||||
if sudo "$PLEX_SQLITE" "$db_file" "SELECT COUNT(*) FROM $table;" >/dev/null 2>&1; then
|
|
||||||
# Table exists and is readable
|
|
||||||
{
|
|
||||||
echo ""
|
|
||||||
echo "-- Data for table $table (limited to $limit rows)"
|
|
||||||
echo "DELETE FROM $table;"
|
|
||||||
} >> "$partial_sql"
|
|
||||||
|
|
||||||
if sudo "$PLEX_SQLITE" "$db_file" ".mode insert $table" >>/dev/null 2>&1 && \
|
|
||||||
sudo "$PLEX_SQLITE" "$db_file" "SELECT * FROM $table LIMIT $limit;" >> "$partial_sql" 2>/dev/null; then
|
|
||||||
local row_count=$(tail -n +3 "$partial_sql" | grep "INSERT INTO $table" | wc -l)
|
|
||||||
log_success "Extracted $row_count rows from $table"
|
|
||||||
extract_success=true
|
|
||||||
else
|
|
||||||
log_warning "Failed to extract $table with limit $limit, trying smaller limit"
|
|
||||||
limit=$((limit / 2))
|
|
||||||
fi
|
|
||||||
else
|
|
||||||
log_warning "Table $table is not accessible or doesn't exist"
|
|
||||||
break
|
|
||||||
fi
|
|
||||||
done
|
|
||||||
|
|
||||||
if [ "$extract_success" = false ]; then
|
|
||||||
log_warning "Could not extract any data from table $table"
|
|
||||||
fi
|
|
||||||
done
|
|
||||||
|
|
||||||
# Create new database from partial data
|
|
||||||
if [ -f "$partial_sql" ] && [ -s "$partial_sql" ]; then
|
|
||||||
log_info "Creating database from partial extraction..."
|
|
||||||
|
|
||||||
if sqlite3 "$new_db" < "$partial_sql" 2>/dev/null; then
|
|
||||||
log_success "Partial database created successfully"
|
|
||||||
|
|
||||||
# Verify basic functionality
|
|
||||||
if sqlite3 "$new_db" "PRAGMA integrity_check;" | grep -q "ok"; then
|
|
||||||
log_success "Partial database integrity verified"
|
|
||||||
|
|
||||||
# Replace original with partial database
|
|
||||||
if sudo mv "$db_file" "${db_file}.corrupted" && sudo mv "$new_db" "$db_file"; then
|
|
||||||
sudo chown plex:plex "$db_file"
|
|
||||||
sudo chmod 644 "$db_file"
|
|
||||||
log_success "Database partially recovered - some data may be lost"
|
|
||||||
log_warning "Please verify your Plex library after recovery"
|
|
||||||
|
|
||||||
# Clean up
|
|
||||||
rm -f "$partial_sql"
|
|
||||||
|
|
||||||
return 0
|
|
||||||
else
|
|
||||||
log_error "Failed to replace original database"
|
|
||||||
fi
|
|
||||||
else
|
|
||||||
log_error "Partial database failed integrity check"
|
|
||||||
fi
|
|
||||||
else
|
|
||||||
log_error "Failed to create database from partial extraction"
|
|
||||||
fi
|
|
||||||
else
|
|
||||||
log_error "Partial extraction SQL file is empty"
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Clean up on failure
|
|
||||||
rm -f "$partial_sql" "$new_db"
|
|
||||||
return 1
|
|
||||||
}
|
|
||||||
|
|
||||||
# Recovery Method 3: Emergency data extraction
|
|
||||||
recovery_method_emergency_extraction() {
|
|
||||||
local db_file="$1"
|
|
||||||
local db_name=$(basename "$db_file")
|
|
||||||
|
|
||||||
log_info "Recovery Method 3: Emergency data extraction for $db_name"
|
|
||||||
|
|
||||||
if [ "$DRY_RUN" = true ]; then
|
|
||||||
log_info "DRY RUN: Would attempt emergency extraction method"
|
|
||||||
return 0
|
|
||||||
fi
|
|
||||||
|
|
||||||
log_warning "This method will create a minimal database with basic library structure"
|
|
||||||
log_warning "You will likely need to re-scan your media libraries"
|
|
||||||
|
|
||||||
if [ "$AUTO_MODE" = false ]; then
|
|
||||||
read -p "Continue with emergency extraction? This will lose most metadata [y/N]: " -n 1 -r
|
|
||||||
echo
|
|
||||||
if [[ ! $REPLY =~ ^[Yy]$ ]]; then
|
|
||||||
log_info "Emergency extraction cancelled by user"
|
|
||||||
return 1
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
|
|
||||||
local emergency_db="${db_file}.emergency"
|
|
||||||
|
|
||||||
# Create a minimal database with essential tables
|
|
||||||
log_info "Creating minimal emergency database..."
|
|
||||||
|
|
||||||
cat > "/tmp/emergency_schema.sql" << 'EOF'
|
|
||||||
-- Emergency Plex database schema (minimal)
|
|
||||||
CREATE TABLE accounts (
|
|
||||||
id INTEGER PRIMARY KEY,
|
|
||||||
name TEXT,
|
|
||||||
hashed_password TEXT,
|
|
||||||
salt TEXT,
|
|
||||||
created_at DATETIME,
|
|
||||||
updated_at DATETIME
|
|
||||||
);
|
|
||||||
|
|
||||||
CREATE TABLE library_sections (
|
|
||||||
id INTEGER PRIMARY KEY,
|
|
||||||
name TEXT,
|
|
||||||
section_type INTEGER,
|
|
||||||
agent TEXT,
|
|
||||||
scanner TEXT,
|
|
||||||
language TEXT,
|
|
||||||
created_at DATETIME,
|
|
||||||
updated_at DATETIME
|
|
||||||
);
|
|
||||||
|
|
||||||
CREATE TABLE directories (
|
|
||||||
id INTEGER PRIMARY KEY,
|
|
||||||
library_section_id INTEGER,
|
|
||||||
path TEXT,
|
|
||||||
created_at DATETIME,
|
|
||||||
updated_at DATETIME
|
|
||||||
);
|
|
||||||
|
|
||||||
-- Insert default admin account
|
|
||||||
INSERT INTO accounts (id, name, created_at, updated_at)
|
|
||||||
VALUES (1, 'plex', datetime('now'), datetime('now'));
|
|
||||||
EOF
|
|
||||||
|
|
||||||
if sqlite3 "$emergency_db" < "/tmp/emergency_schema.sql" 2>/dev/null; then
|
|
||||||
log_success "Emergency database created"
|
|
||||||
|
|
||||||
# Replace original with emergency database
|
|
||||||
if sudo mv "$db_file" "${db_file}.corrupted" && sudo mv "$emergency_db" "$db_file"; then
|
|
||||||
sudo chown plex:plex "$db_file"
|
|
||||||
sudo chmod 644 "$db_file"
|
|
||||||
log_success "Emergency database installed"
|
|
||||||
log_warning "You will need to re-add library sections and re-scan media"
|
|
||||||
|
|
||||||
# Clean up
|
|
||||||
rm -f "/tmp/emergency_schema.sql"
|
|
||||||
|
|
||||||
return 0
|
|
||||||
else
|
|
||||||
log_error "Failed to install emergency database"
|
|
||||||
fi
|
|
||||||
else
|
|
||||||
log_error "Failed to create emergency database"
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Clean up on failure
|
|
||||||
rm -f "/tmp/emergency_schema.sql" "$emergency_db"
|
|
||||||
return 1
|
|
||||||
}
|
|
||||||
|
|
||||||
# Recovery Method 4: Restore from backup
|
|
||||||
recovery_method_backup_restore() {
|
|
||||||
local db_file="$1"
|
|
||||||
local backup_dir="/mnt/share/media/backups/plex"
|
|
||||||
|
|
||||||
log_info "Recovery Method 4: Restore from most recent backup"
|
|
||||||
|
|
||||||
if [ "$DRY_RUN" = true ]; then
|
|
||||||
log_info "DRY RUN: Would attempt backup restoration"
|
|
||||||
return 0
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Find most recent backup
|
|
||||||
local latest_backup=$(find "$backup_dir" -maxdepth 1 -name "plex-backup-*.tar.gz" -type f 2>/dev/null | sort -r | head -1)
|
|
||||||
|
|
||||||
if [ -z "$latest_backup" ]; then
|
|
||||||
log_error "No backup files found in $backup_dir"
|
|
||||||
return 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
log_info "Found latest backup: $(basename "$latest_backup")"
|
|
||||||
|
|
||||||
if [ "$AUTO_MODE" = false ]; then
|
|
||||||
read -p "Restore from backup $(basename "$latest_backup")? [y/N]: " -n 1 -r
|
|
||||||
echo
|
|
||||||
if [[ ! $REPLY =~ ^[Yy]$ ]]; then
|
|
||||||
log_info "Backup restoration cancelled by user"
|
|
||||||
return 1
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Extract and restore database from backup
|
|
||||||
local temp_extract="/tmp/plex-recovery-extract-$(date +%Y%m%d_%H%M%S)"
|
|
||||||
mkdir -p "$temp_extract"
|
|
||||||
|
|
||||||
log_info "Extracting backup..."
|
|
||||||
if tar -xzf "$latest_backup" -C "$temp_extract" 2>/dev/null; then
|
|
||||||
local backup_db_file="$temp_extract/$(basename "$db_file")"
|
|
||||||
|
|
||||||
if [ -f "$backup_db_file" ]; then
|
|
||||||
# Verify backup database integrity
|
|
||||||
if sqlite3 "$backup_db_file" "PRAGMA integrity_check;" | grep -q "ok"; then
|
|
||||||
log_success "Backup database integrity verified"
|
|
||||||
|
|
||||||
# Replace corrupted database with backup
|
|
||||||
if sudo mv "$db_file" "${db_file}.corrupted" && sudo cp "$backup_db_file" "$db_file"; then
|
|
||||||
sudo chown plex:plex "$db_file"
|
|
||||||
sudo chmod 644 "$db_file"
|
|
||||||
log_success "Database restored from backup"
|
|
||||||
|
|
||||||
# Clean up
|
|
||||||
rm -rf "$temp_extract"
|
|
||||||
|
|
||||||
return 0
|
|
||||||
else
|
|
||||||
log_error "Failed to replace database with backup"
|
|
||||||
fi
|
|
||||||
else
|
|
||||||
log_error "Backup database also has integrity issues"
|
|
||||||
fi
|
|
||||||
else
|
|
||||||
log_error "Database file not found in backup"
|
|
||||||
fi
|
|
||||||
else
|
|
||||||
log_error "Failed to extract backup"
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Clean up on failure
|
|
||||||
rm -rf "$temp_extract"
|
|
||||||
return 1
|
|
||||||
}
|
|
||||||
|
|
||||||
# Main recovery function
|
|
||||||
main_recovery() {
|
|
||||||
local db_file="$PLEX_DB_DIR/$MAIN_DB"
|
|
||||||
|
|
||||||
log_info "Starting Plex database recovery process"
|
|
||||||
log_info "Recovery log: $RECOVERY_LOG"
|
|
||||||
|
|
||||||
# Check dependencies
|
|
||||||
if ! check_dependencies; then
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Stop Plex service
|
|
||||||
if ! stop_plex_service; then
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Change to database directory
|
|
||||||
cd "$PLEX_DB_DIR" || {
|
|
||||||
log_error "Failed to change to database directory"
|
|
||||||
exit 1
|
|
||||||
}
|
|
||||||
|
|
||||||
# Check if database exists
|
|
||||||
if [ ! -f "$MAIN_DB" ]; then
|
|
||||||
log_error "Main database file not found: $MAIN_DB"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Create backup of current corrupted state
|
|
||||||
log_info "Creating backup of current corrupted database..."
|
|
||||||
if [ "$DRY_RUN" = false ]; then
|
|
||||||
sudo cp "$MAIN_DB" "${MAIN_DB}.${BACKUP_SUFFIX}"
|
|
||||||
log_success "Corrupted database backed up as: ${MAIN_DB}.${BACKUP_SUFFIX}"
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Check current integrity
|
|
||||||
log_info "Verifying database corruption..."
|
|
||||||
if check_database_integrity "$MAIN_DB"; then
|
|
||||||
log_success "Database integrity check passed - no recovery needed!"
|
|
||||||
start_plex_service
|
|
||||||
exit 0
|
|
||||||
fi
|
|
||||||
|
|
||||||
log_warning "Database corruption confirmed, attempting recovery..."
|
|
||||||
|
|
||||||
# Try recovery methods in order
|
|
||||||
local recovery_methods=(
|
|
||||||
"recovery_method_sqlite_recover"
|
|
||||||
"recovery_method_partial_extraction"
|
|
||||||
"recovery_method_emergency_extraction"
|
|
||||||
"recovery_method_backup_restore"
|
|
||||||
)
|
|
||||||
|
|
||||||
for method in "${recovery_methods[@]}"; do
|
|
||||||
log_info "Attempting: $method"
|
|
||||||
|
|
||||||
if $method "$MAIN_DB"; then
|
|
||||||
log_success "Recovery successful using: $method"
|
|
||||||
|
|
||||||
# Verify the recovered database
|
|
||||||
if check_database_integrity "$MAIN_DB"; then
|
|
||||||
log_success "Recovered database integrity verified"
|
|
||||||
start_plex_service
|
|
||||||
log_success "Database recovery completed successfully!"
|
|
||||||
log_info "Please check your Plex server and verify your libraries"
|
|
||||||
exit 0
|
|
||||||
else
|
|
||||||
log_error "Recovered database still has integrity issues"
|
|
||||||
# Restore backup for next attempt
|
|
||||||
if [ "$DRY_RUN" = false ]; then
|
|
||||||
sudo cp "${MAIN_DB}.${BACKUP_SUFFIX}" "$MAIN_DB"
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
else
|
|
||||||
log_warning "Recovery method failed: $method"
|
|
||||||
fi
|
|
||||||
done
|
|
||||||
|
|
||||||
log_error "All recovery methods failed"
|
|
||||||
log_error "Manual intervention required"
|
|
||||||
|
|
||||||
# Restore original corrupted database
|
|
||||||
if [ "$DRY_RUN" = false ]; then
|
|
||||||
sudo cp "${MAIN_DB}.${BACKUP_SUFFIX}" "$MAIN_DB"
|
|
||||||
fi
|
|
||||||
|
|
||||||
start_plex_service
|
|
||||||
exit 1
|
|
||||||
}
|
|
||||||
|
|
||||||
# Trap to ensure Plex service is restarted
|
|
||||||
trap 'start_plex_service' EXIT
|
|
||||||
|
|
||||||
# Run main recovery
|
|
||||||
main_recovery "$@"
|
|
||||||
@@ -1,306 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
|
|
||||||
################################################################################
|
|
||||||
# Plex Media Server Backup Restoration Script
|
|
||||||
################################################################################
|
|
||||||
#
|
|
||||||
# Author: Peter Wood <peter@peterwood.dev>
|
|
||||||
# Description: Safe and reliable restoration script for Plex Media Server
|
|
||||||
# backups with validation, dry-run capability, and automatic
|
|
||||||
# backup of current data before restoration.
|
|
||||||
#
|
|
||||||
# Features:
|
|
||||||
# - Interactive backup selection from available archives
|
|
||||||
# - Backup validation before restoration
|
|
||||||
# - Dry-run mode for testing restoration process
|
|
||||||
# - Automatic backup of current data before restoration
|
|
||||||
# - Service management (stop/start Plex during restoration)
|
|
||||||
# - Comprehensive logging and error handling
|
|
||||||
# - File ownership and permission restoration
|
|
||||||
#
|
|
||||||
# Related Scripts:
|
|
||||||
# - backup-plex.sh: Creates backups that this script restores
|
|
||||||
# - validate-plex-backups.sh: Validates backup integrity
|
|
||||||
# - monitor-plex-backup.sh: Monitors backup system health
|
|
||||||
# - test-plex-backup.sh: Tests backup/restore operations
|
|
||||||
# - plex.sh: General Plex service management
|
|
||||||
#
|
|
||||||
# Usage:
|
|
||||||
# ./restore-plex.sh # List available backups
|
|
||||||
# ./restore-plex.sh plex-backup-20250125_143022.tar.gz # Restore specific backup
|
|
||||||
# ./restore-plex.sh --dry-run backup-file.tar.gz # Test restoration process
|
|
||||||
# ./restore-plex.sh --list # List all available backups
|
|
||||||
#
|
|
||||||
# Dependencies:
|
|
||||||
# - tar (for archive extraction)
|
|
||||||
# - Plex Media Server
|
|
||||||
# - systemctl (for service management)
|
|
||||||
# - Access to backup directory
|
|
||||||
#
|
|
||||||
# Exit Codes:
|
|
||||||
# 0 - Success
|
|
||||||
# 1 - General error
|
|
||||||
# 2 - Backup file not found or invalid
|
|
||||||
# 3 - Service management failure
|
|
||||||
# 4 - Restoration failure
|
|
||||||
#
|
|
||||||
################################################################################
|
|
||||||
|
|
||||||
# Plex Backup Restoration Script
|
|
||||||
# Usage: ./restore-plex.sh [backup_date] [--dry-run]
|
|
||||||
|
|
||||||
set -e
|
|
||||||
|
|
||||||
RED='\033[0;31m'
|
|
||||||
GREEN='\033[0;32m'
|
|
||||||
YELLOW='\033[1;33m'
|
|
||||||
NC='\033[0m'
|
|
||||||
|
|
||||||
# Configuration
|
|
||||||
SCRIPT_DIR="$(dirname "$(readlink -f "$0")")"
|
|
||||||
BACKUP_ROOT="/mnt/share/media/backups/plex"
|
|
||||||
PLEX_DATA_DIR="/var/lib/plexmediaserver/Library/Application Support/Plex Media Server"
|
|
||||||
|
|
||||||
# Plex file locations
|
|
||||||
declare -A RESTORE_LOCATIONS=(
|
|
||||||
["com.plexapp.plugins.library.db"]="$PLEX_DATA_DIR/Plug-in Support/Databases/"
|
|
||||||
["com.plexapp.plugins.library.blobs.db"]="$PLEX_DATA_DIR/Plug-in Support/Databases/"
|
|
||||||
["Preferences.xml"]="$PLEX_DATA_DIR/"
|
|
||||||
)
|
|
||||||
|
|
||||||
log_message() {
|
|
||||||
echo -e "$(date '+%H:%M:%S') $1"
|
|
||||||
}
|
|
||||||
|
|
||||||
log_error() {
|
|
||||||
log_message "${RED}ERROR: $1${NC}"
|
|
||||||
}
|
|
||||||
|
|
||||||
log_success() {
|
|
||||||
log_message "${GREEN}SUCCESS: $1${NC}"
|
|
||||||
}
|
|
||||||
|
|
||||||
log_warning() {
|
|
||||||
log_message "${YELLOW}WARNING: $1${NC}"
|
|
||||||
}
|
|
||||||
|
|
||||||
# List available backups
|
|
||||||
list_backups() {
|
|
||||||
log_message "Available backups:"
|
|
||||||
find "$BACKUP_ROOT" -maxdepth 1 -type f -name "plex-backup-*.tar.gz" | sort -r | while read backup_file; do
|
|
||||||
local backup_name=$(basename "$backup_file")
|
|
||||||
local backup_date=$(echo "$backup_name" | sed 's/plex-backup-\([0-9]\{8\}\)_[0-9]\{6\}\.tar\.gz/\1/')
|
|
||||||
if [[ "$backup_date" =~ ^[0-9]{8}$ ]]; then
|
|
||||||
local readable_date=$(date -d "${backup_date:0:4}-${backup_date:4:2}-${backup_date:6:2}" '+%B %d, %Y' 2>/dev/null || echo "Unknown date")
|
|
||||||
local file_size=$(du -h "$backup_file" 2>/dev/null | cut -f1)
|
|
||||||
echo " $backup_name ($readable_date) - $file_size"
|
|
||||||
else
|
|
||||||
echo " $backup_name - $(du -h "$backup_file" 2>/dev/null | cut -f1)"
|
|
||||||
fi
|
|
||||||
done
|
|
||||||
}
|
|
||||||
|
|
||||||
# Validate backup integrity
|
|
||||||
validate_backup() {
|
|
||||||
local backup_file="$1"
|
|
||||||
|
|
||||||
if [ ! -f "$backup_file" ]; then
|
|
||||||
log_error "Backup file not found: $backup_file"
|
|
||||||
return 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
log_message "Validating backup integrity for $(basename "$backup_file")..."
|
|
||||||
|
|
||||||
# Test archive integrity
|
|
||||||
if tar -tzf "$backup_file" >/dev/null 2>&1; then
|
|
||||||
log_success "Archive integrity check passed"
|
|
||||||
|
|
||||||
# List contents to verify expected files are present
|
|
||||||
log_message "Archive contents:"
|
|
||||||
tar -tzf "$backup_file" | while read file; do
|
|
||||||
log_success " Found: $file"
|
|
||||||
done
|
|
||||||
return 0
|
|
||||||
else
|
|
||||||
log_error "Archive integrity check failed"
|
|
||||||
return 1
|
|
||||||
fi
|
|
||||||
}
|
|
||||||
|
|
||||||
# Create backup of current Plex data
|
|
||||||
backup_current_data() {
|
|
||||||
local backup_suffix=$(date '+%Y%m%d_%H%M%S')
|
|
||||||
local current_backup_dir="$SCRIPT_DIR/plex_current_backup_$backup_suffix"
|
|
||||||
|
|
||||||
log_message "Creating backup of current Plex data..."
|
|
||||||
mkdir -p "$current_backup_dir"
|
|
||||||
|
|
||||||
for file in "${!RESTORE_LOCATIONS[@]}"; do
|
|
||||||
local src="${RESTORE_LOCATIONS[$file]}$file"
|
|
||||||
if [ -f "$src" ]; then
|
|
||||||
if sudo cp "$src" "$current_backup_dir/"; then
|
|
||||||
log_success "Backed up current: $file"
|
|
||||||
else
|
|
||||||
log_error "Failed to backup current: $file"
|
|
||||||
return 1
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
done
|
|
||||||
|
|
||||||
log_success "Current data backed up to: $current_backup_dir"
|
|
||||||
echo "$current_backup_dir"
|
|
||||||
}
|
|
||||||
|
|
||||||
# Restore files from backup
|
|
||||||
restore_files() {
|
|
||||||
local backup_file="$1"
|
|
||||||
local dry_run="$2"
|
|
||||||
|
|
||||||
if [ ! -f "$backup_file" ]; then
|
|
||||||
log_error "Backup file not found: $backup_file"
|
|
||||||
return 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Create temporary extraction directory
|
|
||||||
local temp_dir="/tmp/plex-restore-$(date +%Y%m%d_%H%M%S)"
|
|
||||||
mkdir -p "$temp_dir"
|
|
||||||
|
|
||||||
log_message "Extracting backup archive..."
|
|
||||||
if ! tar -xzf "$backup_file" -C "$temp_dir"; then
|
|
||||||
log_error "Failed to extract backup archive"
|
|
||||||
rm -rf "$temp_dir"
|
|
||||||
return 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
log_message "Restoring files..."
|
|
||||||
local restore_errors=0
|
|
||||||
|
|
||||||
for file in "${!RESTORE_LOCATIONS[@]}"; do
|
|
||||||
local src_file="$temp_dir/$file"
|
|
||||||
local dest_path="${RESTORE_LOCATIONS[$file]}"
|
|
||||||
local dest_file="$dest_path$file"
|
|
||||||
|
|
||||||
if [ -f "$src_file" ]; then
|
|
||||||
if [ "$dry_run" == "true" ]; then
|
|
||||||
log_message "Would restore: $file to $dest_file"
|
|
||||||
else
|
|
||||||
log_message "Restoring: $file"
|
|
||||||
if sudo cp "$src_file" "$dest_file"; then
|
|
||||||
sudo chown plex:plex "$dest_file"
|
|
||||||
log_success "Restored: $file"
|
|
||||||
else
|
|
||||||
log_error "Failed to restore: $file"
|
|
||||||
restore_errors=$((restore_errors + 1))
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
else
|
|
||||||
log_warning "File not found in backup: $file"
|
|
||||||
restore_errors=$((restore_errors + 1))
|
|
||||||
fi
|
|
||||||
done
|
|
||||||
|
|
||||||
# Clean up temporary directory
|
|
||||||
rm -rf "$temp_dir"
|
|
||||||
|
|
||||||
return $restore_errors
|
|
||||||
}
|
|
||||||
|
|
||||||
# Manage Plex service
|
|
||||||
manage_plex_service() {
|
|
||||||
local action="$1"
|
|
||||||
log_message "$action Plex Media Server..."
|
|
||||||
|
|
||||||
case "$action" in
|
|
||||||
"stop")
|
|
||||||
sudo systemctl stop plexmediaserver.service
|
|
||||||
sleep 3
|
|
||||||
log_success "Plex stopped"
|
|
||||||
;;
|
|
||||||
"start")
|
|
||||||
sudo systemctl start plexmediaserver.service
|
|
||||||
sleep 3
|
|
||||||
log_success "Plex started"
|
|
||||||
;;
|
|
||||||
esac
|
|
||||||
}
|
|
||||||
|
|
||||||
# Main function
|
|
||||||
main() {
|
|
||||||
local backup_file="$1"
|
|
||||||
local dry_run=false
|
|
||||||
|
|
||||||
# Check for dry-run flag
|
|
||||||
if [ "$2" = "--dry-run" ] || [ "$1" = "--dry-run" ]; then
|
|
||||||
dry_run=true
|
|
||||||
fi
|
|
||||||
|
|
||||||
# If no backup file provided, list available backups
|
|
||||||
if [ -z "$backup_file" ] || [ "$backup_file" = "--dry-run" ]; then
|
|
||||||
list_backups
|
|
||||||
echo
|
|
||||||
echo "Usage: $0 <backup_file> [--dry-run]"
|
|
||||||
echo "Example: $0 plex-backup-20250125_143022.tar.gz"
|
|
||||||
echo " $0 /mnt/share/media/backups/plex/plex-backup-20250125_143022.tar.gz"
|
|
||||||
exit 0
|
|
||||||
fi
|
|
||||||
|
|
||||||
# If relative path, prepend BACKUP_ROOT
|
|
||||||
if [[ "$backup_file" != /* ]]; then
|
|
||||||
backup_file="$BACKUP_ROOT/$backup_file"
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Validate backup exists and is complete
|
|
||||||
if ! validate_backup "$backup_file"; then
|
|
||||||
log_error "Backup validation failed"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [ "$dry_run" = "true" ]; then
|
|
||||||
restore_files "$backup_file" true
|
|
||||||
log_message "Dry run completed. No changes were made."
|
|
||||||
exit 0
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Confirm restoration
|
|
||||||
echo
|
|
||||||
log_warning "This will restore Plex data from backup $(basename "$backup_file")"
|
|
||||||
log_warning "Current Plex data will be backed up before restoration"
|
|
||||||
read -p "Continue? (y/N): " -n 1 -r
|
|
||||||
echo
|
|
||||||
|
|
||||||
if [[ ! $REPLY =~ ^[Yy]$ ]]; then
|
|
||||||
log_message "Restoration cancelled"
|
|
||||||
exit 0
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Stop Plex service
|
|
||||||
manage_plex_service stop
|
|
||||||
|
|
||||||
# Backup current data
|
|
||||||
local current_backup=$(backup_current_data)
|
|
||||||
if [ $? -ne 0 ]; then
|
|
||||||
log_error "Failed to backup current data"
|
|
||||||
manage_plex_service start
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Restore files
|
|
||||||
if restore_files "$backup_file" false; then
|
|
||||||
log_success "Restoration completed successfully"
|
|
||||||
log_message "Current data backup saved at: $current_backup"
|
|
||||||
else
|
|
||||||
log_error "Restoration failed"
|
|
||||||
manage_plex_service start
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Start Plex service
|
|
||||||
manage_plex_service start
|
|
||||||
|
|
||||||
log_success "Plex restoration completed. Please verify your server is working correctly."
|
|
||||||
}
|
|
||||||
|
|
||||||
# Trap to ensure Plex is restarted on script exit
|
|
||||||
trap 'manage_plex_service start' EXIT
|
|
||||||
|
|
||||||
main "$@"
|
|
||||||
@@ -1,715 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
|
|
||||||
################################################################################
|
|
||||||
# Plex Backup System Comprehensive Test Suite
|
|
||||||
################################################################################
|
|
||||||
#
|
|
||||||
# Author: Peter Wood <peter@peterwood.dev>
|
|
||||||
# Description: Automated testing framework for the complete Plex backup
|
|
||||||
# ecosystem, providing unit tests, integration tests, and
|
|
||||||
# end-to-end validation of all backup operations.
|
|
||||||
#
|
|
||||||
# Features:
|
|
||||||
# - Unit testing for individual backup components
|
|
||||||
# - Integration testing for full backup workflows
|
|
||||||
# - Database integrity test scenarios
|
|
||||||
# - Service management testing
|
|
||||||
# - Performance benchmarking
|
|
||||||
# - Error condition simulation and recovery testing
|
|
||||||
# - Test result reporting and analysis
|
|
||||||
#
|
|
||||||
# Related Scripts:
|
|
||||||
# - backup-plex.sh: Primary script under test
|
|
||||||
# - restore-plex.sh: Restoration testing component
|
|
||||||
# - validate-plex-backups.sh: Validation testing
|
|
||||||
# - monitor-plex-backup.sh: Monitoring system testing
|
|
||||||
# - plex.sh: Service management testing
|
|
||||||
#
|
|
||||||
# Usage:
|
|
||||||
# ./test-plex-backup.sh # Run full test suite
|
|
||||||
# ./test-plex-backup.sh --unit # Unit tests only
|
|
||||||
# ./test-plex-backup.sh --integration # Integration tests only
|
|
||||||
# ./test-plex-backup.sh --quick # Quick smoke tests
|
|
||||||
# ./test-plex-backup.sh --cleanup # Clean up test artifacts
|
|
||||||
#
|
|
||||||
# Dependencies:
|
|
||||||
# - All Plex backup scripts in this directory
|
|
||||||
# - sqlite3 or Plex SQLite binary
|
|
||||||
# - jq (for JSON processing)
|
|
||||||
# - tar (for archive operations)
|
|
||||||
# - systemctl (for service testing)
|
|
||||||
#
|
|
||||||
# Exit Codes:
|
|
||||||
# 0 - All tests passed
|
|
||||||
# 1 - General error
|
|
||||||
# 2 - Test failures detected
|
|
||||||
# 3 - Missing dependencies
|
|
||||||
# 4 - Test setup failure
|
|
||||||
#
|
|
||||||
################################################################################
|
|
||||||
|
|
||||||
# Comprehensive Plex Backup System Test Suite
|
|
||||||
# This script provides automated testing for all backup-related functionality
|
|
||||||
|
|
||||||
set -e
|
|
||||||
|
|
||||||
# Color codes for output
|
|
||||||
RED='\033[0;31m'
|
|
||||||
GREEN='\033[0;32m'
|
|
||||||
YELLOW='\033[1;33m'
|
|
||||||
BLUE='\033[0;34m'
|
|
||||||
CYAN='\033[0;36m'
|
|
||||||
NC='\033[0m' # No Color
|
|
||||||
|
|
||||||
# Test configuration
|
|
||||||
SCRIPT_DIR="$(dirname "$(readlink -f "$0")")"
|
|
||||||
TEST_DIR="/tmp/plex-backup-test-$(date +%s)"
|
|
||||||
TEST_BACKUP_ROOT="$TEST_DIR/backups"
|
|
||||||
TEST_LOG_ROOT="$TEST_DIR/logs"
|
|
||||||
TEST_RESULTS_FILE="$TEST_DIR/test-results.json"
|
|
||||||
|
|
||||||
# Test counters
|
|
||||||
TESTS_RUN=0
|
|
||||||
TESTS_PASSED=0
|
|
||||||
TESTS_FAILED=0
|
|
||||||
declare -a FAILED_TESTS=()
|
|
||||||
|
|
||||||
# Logging functions
|
|
||||||
log_test() {
|
|
||||||
local timestamp=$(date '+%Y-%m-%d %H:%M:%S')
|
|
||||||
echo -e "${CYAN}[TEST ${timestamp}]${NC} $1"
|
|
||||||
}
|
|
||||||
|
|
||||||
log_pass() {
|
|
||||||
local timestamp=$(date '+%Y-%m-%d %H:%M:%S')
|
|
||||||
echo -e "${GREEN}[PASS ${timestamp}]${NC} $1"
|
|
||||||
TESTS_PASSED=$((TESTS_PASSED + 1))
|
|
||||||
}
|
|
||||||
|
|
||||||
log_fail() {
|
|
||||||
local timestamp=$(date '+%Y-%m-%d %H:%M:%S')
|
|
||||||
echo -e "${RED}[FAIL ${timestamp}]${NC} $1"
|
|
||||||
TESTS_FAILED=$((TESTS_FAILED + 1))
|
|
||||||
FAILED_TESTS+=("$1")
|
|
||||||
}
|
|
||||||
|
|
||||||
log_info() {
|
|
||||||
local timestamp=$(date '+%Y-%m-%d %H:%M:%S')
|
|
||||||
echo -e "${BLUE}[INFO ${timestamp}]${NC} $1"
|
|
||||||
}
|
|
||||||
|
|
||||||
log_warn() {
|
|
||||||
local timestamp=$(date '+%Y-%m-%d %H:%M:%S')
|
|
||||||
echo -e "${YELLOW}[WARN ${timestamp}]${NC} $1"
|
|
||||||
}
|
|
||||||
|
|
||||||
# Test framework functions
|
|
||||||
run_test() {
|
|
||||||
local test_name="$1"
|
|
||||||
local test_function="$2"
|
|
||||||
|
|
||||||
TESTS_RUN=$((TESTS_RUN + 1))
|
|
||||||
log_test "Running: $test_name"
|
|
||||||
|
|
||||||
if $test_function; then
|
|
||||||
log_pass "$test_name"
|
|
||||||
record_test_result "$test_name" "PASS" ""
|
|
||||||
else
|
|
||||||
log_fail "$test_name"
|
|
||||||
record_test_result "$test_name" "FAIL" "Test function returned non-zero exit code"
|
|
||||||
fi
|
|
||||||
}
|
|
||||||
|
|
||||||
record_test_result() {
|
|
||||||
local test_name="$1"
|
|
||||||
local status="$2"
|
|
||||||
local error_message="$3"
|
|
||||||
local timestamp=$(date -Iseconds)
|
|
||||||
|
|
||||||
# Initialize results file if it doesn't exist
|
|
||||||
if [ ! -f "$TEST_RESULTS_FILE" ]; then
|
|
||||||
echo "[]" > "$TEST_RESULTS_FILE"
|
|
||||||
fi
|
|
||||||
|
|
||||||
local result=$(jq -n \
|
|
||||||
--arg test_name "$test_name" \
|
|
||||||
--arg status "$status" \
|
|
||||||
--arg error_message "$error_message" \
|
|
||||||
--arg timestamp "$timestamp" \
|
|
||||||
'{
|
|
||||||
test_name: $test_name,
|
|
||||||
status: $status,
|
|
||||||
error_message: $error_message,
|
|
||||||
timestamp: $timestamp
|
|
||||||
}')
|
|
||||||
|
|
||||||
jq --argjson result "$result" '. += [$result]' "$TEST_RESULTS_FILE" > "${TEST_RESULTS_FILE}.tmp" && \
|
|
||||||
mv "${TEST_RESULTS_FILE}.tmp" "$TEST_RESULTS_FILE"
|
|
||||||
}
|
|
||||||
|
|
||||||
# Setup test environment
|
|
||||||
setup_test_environment() {
|
|
||||||
log_info "Setting up test environment in $TEST_DIR"
|
|
||||||
|
|
||||||
# Create test directories
|
|
||||||
mkdir -p "$TEST_DIR"
|
|
||||||
mkdir -p "$TEST_BACKUP_ROOT"
|
|
||||||
mkdir -p "$TEST_LOG_ROOT"
|
|
||||||
mkdir -p "$TEST_DIR/mock_plex"
|
|
||||||
|
|
||||||
# Create mock Plex files for testing
|
|
||||||
echo "PRAGMA user_version=1;" > "$TEST_DIR/mock_plex/com.plexapp.plugins.library.db"
|
|
||||||
echo "PRAGMA user_version=1;" > "$TEST_DIR/mock_plex/com.plexapp.plugins.library.blobs.db"
|
|
||||||
dd if=/dev/zero of="$TEST_DIR/mock_plex/Preferences.xml" bs=1024 count=1 2>/dev/null
|
|
||||||
|
|
||||||
# Create mock performance log
|
|
||||||
echo "[]" > "$TEST_DIR/mock-performance.json"
|
|
||||||
echo "{}" > "$TEST_DIR/mock-backup.json"
|
|
||||||
|
|
||||||
log_info "Test environment setup complete"
|
|
||||||
}
|
|
||||||
|
|
||||||
# Cleanup test environment
|
|
||||||
cleanup_test_environment() {
|
|
||||||
if [ -d "$TEST_DIR" ]; then
|
|
||||||
log_info "Cleaning up test environment"
|
|
||||||
rm -rf "$TEST_DIR"
|
|
||||||
fi
|
|
||||||
}
|
|
||||||
|
|
||||||
# Mock functions to replace actual backup script functions
|
|
||||||
mock_manage_plex_service() {
|
|
||||||
local action="$1"
|
|
||||||
echo "Mock: Plex service $action"
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
|
|
||||||
mock_calculate_checksum() {
|
|
||||||
local file="$1"
|
|
||||||
echo "$(echo "$file" | md5sum | cut -d' ' -f1)"
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
|
|
||||||
mock_verify_backup() {
|
|
||||||
local src="$1"
|
|
||||||
local dest="$2"
|
|
||||||
# Always return success for testing
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
|
|
||||||
# Test: JSON log initialization
|
|
||||||
test_json_log_initialization() {
|
|
||||||
local test_log="$TEST_DIR/test-init.json"
|
|
||||||
|
|
||||||
# Remove file if it exists
|
|
||||||
rm -f "$test_log"
|
|
||||||
|
|
||||||
# Test initialization
|
|
||||||
if [ ! -f "$test_log" ] || ! jq empty "$test_log" 2>/dev/null; then
|
|
||||||
echo "{}" > "$test_log"
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Verify file exists and is valid JSON
|
|
||||||
if [ -f "$test_log" ] && jq empty "$test_log" 2>/dev/null; then
|
|
||||||
return 0
|
|
||||||
else
|
|
||||||
return 1
|
|
||||||
fi
|
|
||||||
}
|
|
||||||
|
|
||||||
# Test: Performance tracking
|
|
||||||
test_performance_tracking() {
|
|
||||||
local test_perf_log="$TEST_DIR/test-performance.json"
|
|
||||||
echo "[]" > "$test_perf_log"
|
|
||||||
|
|
||||||
# Mock performance tracking function
|
|
||||||
track_performance_test() {
|
|
||||||
local operation="$1"
|
|
||||||
local start_time="$2"
|
|
||||||
local end_time=$(date +%s)
|
|
||||||
local duration=$((end_time - start_time))
|
|
||||||
|
|
||||||
local entry=$(jq -n \
|
|
||||||
--arg operation "$operation" \
|
|
||||||
--arg duration "$duration" \
|
|
||||||
--arg timestamp "$(date -Iseconds)" \
|
|
||||||
'{
|
|
||||||
operation: $operation,
|
|
||||||
duration_seconds: ($duration | tonumber),
|
|
||||||
timestamp: $timestamp
|
|
||||||
}')
|
|
||||||
|
|
||||||
jq --argjson entry "$entry" '. += [$entry]' "$test_perf_log" > "${test_perf_log}.tmp" && \
|
|
||||||
mv "${test_perf_log}.tmp" "$test_perf_log"
|
|
||||||
}
|
|
||||||
|
|
||||||
# Test tracking
|
|
||||||
local start_time=$(date +%s)
|
|
||||||
sleep 1 # Simulate work
|
|
||||||
track_performance_test "test_operation" "$start_time"
|
|
||||||
|
|
||||||
# Verify entry was added
|
|
||||||
local entry_count=$(jq length "$test_perf_log")
|
|
||||||
if [ "$entry_count" -eq 1 ]; then
|
|
||||||
return 0
|
|
||||||
else
|
|
||||||
return 1
|
|
||||||
fi
|
|
||||||
}
|
|
||||||
|
|
||||||
# Test: Notification system
|
|
||||||
test_notification_system() {
|
|
||||||
# Mock notification function
|
|
||||||
send_notification_test() {
|
|
||||||
local title="$1"
|
|
||||||
local message="$2"
|
|
||||||
local status="${3:-info}"
|
|
||||||
|
|
||||||
# Just verify parameters are received correctly
|
|
||||||
if [ -n "$title" ] && [ -n "$message" ]; then
|
|
||||||
echo "Notification: $title - $message ($status)" > "$TEST_DIR/notification.log"
|
|
||||||
return 0
|
|
||||||
else
|
|
||||||
return 1
|
|
||||||
fi
|
|
||||||
}
|
|
||||||
|
|
||||||
# Test notification
|
|
||||||
send_notification_test "Test Title" "Test Message" "success"
|
|
||||||
|
|
||||||
# Verify notification was processed
|
|
||||||
if [ -f "$TEST_DIR/notification.log" ] && grep -q "Test Title" "$TEST_DIR/notification.log"; then
|
|
||||||
return 0
|
|
||||||
else
|
|
||||||
return 1
|
|
||||||
fi
|
|
||||||
}
|
|
||||||
|
|
||||||
# Test: Checksum caching
|
|
||||||
test_checksum_caching() {
|
|
||||||
local test_file="$TEST_DIR/checksum_test.txt"
|
|
||||||
local cache_file="${test_file}.md5"
|
|
||||||
|
|
||||||
# Create test file
|
|
||||||
echo "test content" > "$test_file"
|
|
||||||
|
|
||||||
# Mock checksum function with caching
|
|
||||||
calculate_checksum_test() {
|
|
||||||
local file="$1"
|
|
||||||
local cache_file="${file}.md5"
|
|
||||||
local file_mtime=$(stat -c %Y "$file" 2>/dev/null || echo "0")
|
|
||||||
|
|
||||||
# Check cache
|
|
||||||
if [ -f "$cache_file" ]; then
|
|
||||||
local cache_mtime=$(stat -c %Y "$cache_file" 2>/dev/null || echo "0")
|
|
||||||
if [ "$cache_mtime" -gt "$file_mtime" ]; then
|
|
||||||
cat "$cache_file"
|
|
||||||
return 0
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Calculate and cache
|
|
||||||
local checksum=$(md5sum "$file" | cut -d' ' -f1)
|
|
||||||
echo "$checksum" > "$cache_file"
|
|
||||||
echo "$checksum"
|
|
||||||
}
|
|
||||||
|
|
||||||
# First calculation (should create cache)
|
|
||||||
local checksum1=$(calculate_checksum_test "$test_file")
|
|
||||||
|
|
||||||
# Second calculation (should use cache)
|
|
||||||
local checksum2=$(calculate_checksum_test "$test_file")
|
|
||||||
|
|
||||||
# Verify checksums match and cache file exists
|
|
||||||
if [ "$checksum1" = "$checksum2" ] && [ -f "$cache_file" ]; then
|
|
||||||
return 0
|
|
||||||
else
|
|
||||||
return 1
|
|
||||||
fi
|
|
||||||
}
|
|
||||||
|
|
||||||
# Test: Backup verification
|
|
||||||
test_backup_verification() {
|
|
||||||
local src_file="$TEST_DIR/source.txt"
|
|
||||||
local dest_file="$TEST_DIR/backup.txt"
|
|
||||||
|
|
||||||
# Create identical files
|
|
||||||
echo "backup test content" > "$src_file"
|
|
||||||
cp "$src_file" "$dest_file"
|
|
||||||
|
|
||||||
# Mock verification function
|
|
||||||
verify_backup_test() {
|
|
||||||
local src="$1"
|
|
||||||
local dest="$2"
|
|
||||||
|
|
||||||
local src_checksum=$(md5sum "$src" | cut -d' ' -f1)
|
|
||||||
local dest_checksum=$(md5sum "$dest" | cut -d' ' -f1)
|
|
||||||
|
|
||||||
if [ "$src_checksum" = "$dest_checksum" ]; then
|
|
||||||
return 0
|
|
||||||
else
|
|
||||||
return 1
|
|
||||||
fi
|
|
||||||
}
|
|
||||||
|
|
||||||
# Test verification
|
|
||||||
if verify_backup_test "$src_file" "$dest_file"; then
|
|
||||||
return 0
|
|
||||||
else
|
|
||||||
return 1
|
|
||||||
fi
|
|
||||||
}
|
|
||||||
|
|
||||||
# Test: Parallel processing framework
|
|
||||||
test_parallel_processing() {
|
|
||||||
local temp_dir=$(mktemp -d)
|
|
||||||
local -a pids=()
|
|
||||||
local total_jobs=5
|
|
||||||
local completed_jobs=0
|
|
||||||
|
|
||||||
# Simulate parallel jobs
|
|
||||||
for i in $(seq 1 $total_jobs); do
|
|
||||||
(
|
|
||||||
# Simulate work
|
|
||||||
sleep 0.$i
|
|
||||||
echo "$i" > "$temp_dir/job_$i.result"
|
|
||||||
) &
|
|
||||||
pids+=($!)
|
|
||||||
done
|
|
||||||
|
|
||||||
# Wait for all jobs
|
|
||||||
for pid in "${pids[@]}"; do
|
|
||||||
if wait "$pid"; then
|
|
||||||
completed_jobs=$((completed_jobs + 1))
|
|
||||||
fi
|
|
||||||
done
|
|
||||||
|
|
||||||
# Verify all jobs completed
|
|
||||||
local result_files=$(find "$temp_dir" -name "job_*.result" | wc -l)
|
|
||||||
|
|
||||||
# Cleanup
|
|
||||||
rm -rf "$temp_dir"
|
|
||||||
|
|
||||||
if [ "$completed_jobs" -eq "$total_jobs" ] && [ "$result_files" -eq "$total_jobs" ]; then
|
|
||||||
return 0
|
|
||||||
else
|
|
||||||
return 1
|
|
||||||
fi
|
|
||||||
}
|
|
||||||
|
|
||||||
# Test: Database integrity check simulation
|
|
||||||
test_database_integrity() {
|
|
||||||
local test_db="$TEST_DIR/test.db"
|
|
||||||
|
|
||||||
# Create a simple SQLite database
|
|
||||||
sqlite3 "$test_db" "CREATE TABLE test (id INTEGER, name TEXT);"
|
|
||||||
sqlite3 "$test_db" "INSERT INTO test VALUES (1, 'test');"
|
|
||||||
|
|
||||||
# Mock integrity check
|
|
||||||
check_integrity_test() {
|
|
||||||
local db_file="$1"
|
|
||||||
|
|
||||||
# Use sqlite3 instead of Plex SQLite for testing
|
|
||||||
local result=$(sqlite3 "$db_file" "PRAGMA integrity_check;" 2>/dev/null)
|
|
||||||
|
|
||||||
if echo "$result" | grep -q "ok"; then
|
|
||||||
return 0
|
|
||||||
else
|
|
||||||
return 1
|
|
||||||
fi
|
|
||||||
}
|
|
||||||
|
|
||||||
# Test integrity check
|
|
||||||
if check_integrity_test "$test_db"; then
|
|
||||||
return 0
|
|
||||||
else
|
|
||||||
return 1
|
|
||||||
fi
|
|
||||||
}
|
|
||||||
|
|
||||||
# Test: Configuration parsing
|
|
||||||
test_configuration_parsing() {
|
|
||||||
# Mock command line parsing
|
|
||||||
parse_args_test() {
|
|
||||||
local args=("$@")
|
|
||||||
local auto_repair=false
|
|
||||||
local parallel=true
|
|
||||||
local webhook=""
|
|
||||||
|
|
||||||
for arg in "${args[@]}"; do
|
|
||||||
case "$arg" in
|
|
||||||
--auto-repair) auto_repair=true ;;
|
|
||||||
--no-parallel) parallel=false ;;
|
|
||||||
--webhook=*) webhook="${arg#*=}" ;;
|
|
||||||
esac
|
|
||||||
done
|
|
||||||
|
|
||||||
# Return parsed values
|
|
||||||
echo "$auto_repair $parallel $webhook"
|
|
||||||
}
|
|
||||||
|
|
||||||
# Test parsing
|
|
||||||
local result=$(parse_args_test --auto-repair --webhook=http://example.com)
|
|
||||||
|
|
||||||
if echo "$result" | grep -q "true true http://example.com"; then
|
|
||||||
return 0
|
|
||||||
else
|
|
||||||
return 1
|
|
||||||
fi
|
|
||||||
}
|
|
||||||
|
|
||||||
# Test: Error handling
|
|
||||||
test_error_handling() {
|
|
||||||
# Mock function that can fail
|
|
||||||
test_function_with_error() {
|
|
||||||
local should_fail="$1"
|
|
||||||
|
|
||||||
if [ "$should_fail" = "true" ]; then
|
|
||||||
return 1
|
|
||||||
else
|
|
||||||
return 0
|
|
||||||
fi
|
|
||||||
}
|
|
||||||
|
|
||||||
# Test success case
|
|
||||||
if test_function_with_error "false"; then
|
|
||||||
# Test failure case
|
|
||||||
if ! test_function_with_error "true"; then
|
|
||||||
return 0 # Both cases worked as expected
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
|
|
||||||
return 1
|
|
||||||
}
|
|
||||||
|
|
||||||
# Run all unit tests
|
|
||||||
run_all_tests() {
|
|
||||||
log_info "Setting up test environment"
|
|
||||||
setup_test_environment
|
|
||||||
|
|
||||||
log_info "Starting unit tests"
|
|
||||||
|
|
||||||
# Core functionality tests
|
|
||||||
run_test "JSON Log Initialization" test_json_log_initialization
|
|
||||||
run_test "Performance Tracking" test_performance_tracking
|
|
||||||
run_test "Notification System" test_notification_system
|
|
||||||
run_test "Checksum Caching" test_checksum_caching
|
|
||||||
run_test "Backup Verification" test_backup_verification
|
|
||||||
run_test "Parallel Processing" test_parallel_processing
|
|
||||||
run_test "Database Integrity Check" test_database_integrity
|
|
||||||
run_test "Configuration Parsing" test_configuration_parsing
|
|
||||||
run_test "Error Handling" test_error_handling
|
|
||||||
|
|
||||||
log_info "Unit tests completed"
|
|
||||||
}
|
|
||||||
|
|
||||||
# Run integration tests (requires actual Plex environment)
|
|
||||||
run_integration_tests() {
|
|
||||||
log_info "Starting integration tests"
|
|
||||||
log_warn "Integration tests require a working Plex installation"
|
|
||||||
|
|
||||||
# Check if Plex service exists
|
|
||||||
if ! systemctl list-units --all | grep -q plexmediaserver; then
|
|
||||||
log_warn "Plex service not found - skipping integration tests"
|
|
||||||
return 0
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Test actual service management (if safe to do so)
|
|
||||||
log_info "Integration tests would test actual Plex service management"
|
|
||||||
log_info "Skipping for safety - implement with caution"
|
|
||||||
}
|
|
||||||
|
|
||||||
# Run performance tests
|
|
||||||
run_performance_tests() {
|
|
||||||
log_info "Starting performance benchmarks"
|
|
||||||
|
|
||||||
local start_time=$(date +%s)
|
|
||||||
|
|
||||||
# Test file operations
|
|
||||||
local test_file="$TEST_DIR/perf_test.dat"
|
|
||||||
dd if=/dev/zero of="$test_file" bs=1M count=10 2>/dev/null
|
|
||||||
|
|
||||||
# Benchmark checksum calculation
|
|
||||||
local checksum_start=$(date +%s)
|
|
||||||
md5sum "$test_file" > /dev/null
|
|
||||||
local checksum_time=$(($(date +%s) - checksum_start))
|
|
||||||
|
|
||||||
# Benchmark compression
|
|
||||||
local compress_start=$(date +%s)
|
|
||||||
tar -czf "$TEST_DIR/perf_test.tar.gz" -C "$TEST_DIR" "perf_test.dat"
|
|
||||||
local compress_time=$(($(date +%s) - compress_start))
|
|
||||||
|
|
||||||
local total_time=$(($(date +%s) - start_time))
|
|
||||||
|
|
||||||
log_info "Performance Results:"
|
|
||||||
log_info " Checksum (10MB): ${checksum_time}s"
|
|
||||||
log_info " Compression (10MB): ${compress_time}s"
|
|
||||||
log_info " Total benchmark time: ${total_time}s"
|
|
||||||
|
|
||||||
# Record performance data
|
|
||||||
local perf_entry=$(jq -n \
|
|
||||||
--arg checksum_time "$checksum_time" \
|
|
||||||
--arg compress_time "$compress_time" \
|
|
||||||
--arg total_time "$total_time" \
|
|
||||||
--arg timestamp "$(date -Iseconds)" \
|
|
||||||
'{
|
|
||||||
benchmark: "performance_test",
|
|
||||||
checksum_time_seconds: ($checksum_time | tonumber),
|
|
||||||
compress_time_seconds: ($compress_time | tonumber),
|
|
||||||
total_time_seconds: ($total_time | tonumber),
|
|
||||||
timestamp: $timestamp
|
|
||||||
}')
|
|
||||||
|
|
||||||
echo "$perf_entry" > "$TEST_DIR/performance_results.json"
|
|
||||||
}
|
|
||||||
|
|
||||||
# Generate comprehensive test report
|
|
||||||
generate_test_report() {
|
|
||||||
local timestamp=$(date '+%Y-%m-%d %H:%M:%S')
|
|
||||||
|
|
||||||
echo
|
|
||||||
echo "=============================================="
|
|
||||||
echo " PLEX BACKUP TEST REPORT"
|
|
||||||
echo "=============================================="
|
|
||||||
echo "Test Run: $timestamp"
|
|
||||||
echo "Tests Run: $TESTS_RUN"
|
|
||||||
echo "Tests Passed: $TESTS_PASSED"
|
|
||||||
echo "Tests Failed: $TESTS_FAILED"
|
|
||||||
echo
|
|
||||||
|
|
||||||
if [ $TESTS_FAILED -gt 0 ]; then
|
|
||||||
echo "FAILED TESTS:"
|
|
||||||
for failed_test in "${FAILED_TESTS[@]}"; do
|
|
||||||
echo " - $failed_test"
|
|
||||||
done
|
|
||||||
echo
|
|
||||||
fi
|
|
||||||
|
|
||||||
local success_rate=0
|
|
||||||
if [ $TESTS_RUN -gt 0 ]; then
|
|
||||||
success_rate=$(( (TESTS_PASSED * 100) / TESTS_RUN ))
|
|
||||||
fi
|
|
||||||
|
|
||||||
echo "Success Rate: ${success_rate}%"
|
|
||||||
echo
|
|
||||||
|
|
||||||
if [ $TESTS_FAILED -eq 0 ]; then
|
|
||||||
log_pass "All tests passed successfully!"
|
|
||||||
else
|
|
||||||
log_fail "Some tests failed - review output above"
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Save detailed results
|
|
||||||
if [ -f "$TEST_RESULTS_FILE" ]; then
|
|
||||||
local report_file="$TEST_DIR/test_report_$(date +%Y%m%d_%H%M%S).json"
|
|
||||||
jq -n \
|
|
||||||
--arg timestamp "$timestamp" \
|
|
||||||
--arg tests_run "$TESTS_RUN" \
|
|
||||||
--arg tests_passed "$TESTS_PASSED" \
|
|
||||||
--arg tests_failed "$TESTS_FAILED" \
|
|
||||||
--arg success_rate "$success_rate" \
|
|
||||||
--argjson failed_tests "$(printf '%s\n' "${FAILED_TESTS[@]}" | jq -R . | jq -s .)" \
|
|
||||||
--argjson test_details "$(cat "$TEST_RESULTS_FILE")" \
|
|
||||||
'{
|
|
||||||
test_run_timestamp: $timestamp,
|
|
||||||
summary: {
|
|
||||||
tests_run: ($tests_run | tonumber),
|
|
||||||
tests_passed: ($tests_passed | tonumber),
|
|
||||||
tests_failed: ($tests_failed | tonumber),
|
|
||||||
success_rate_percent: ($success_rate | tonumber)
|
|
||||||
},
|
|
||||||
failed_tests: $failed_tests,
|
|
||||||
detailed_results: $test_details
|
|
||||||
}' > "$report_file"
|
|
||||||
|
|
||||||
log_info "Detailed test report saved to: $report_file"
|
|
||||||
fi
|
|
||||||
}
|
|
||||||
|
|
||||||
# Integration tests (if requested)
|
|
||||||
run_integration_tests() {
|
|
||||||
log_info "Running integration tests..."
|
|
||||||
|
|
||||||
# Note: These would require actual Plex installation
|
|
||||||
# For now, we'll just indicate what would be tested
|
|
||||||
|
|
||||||
log_warn "Integration tests require running Plex Media Server"
|
|
||||||
log_warn "These tests would cover:"
|
|
||||||
log_warn " - Service stop/start functionality"
|
|
||||||
log_warn " - Database integrity checks"
|
|
||||||
log_warn " - Full backup and restore cycles"
|
|
||||||
log_warn " - Performance under load"
|
|
||||||
}
|
|
||||||
|
|
||||||
# Performance benchmarks
|
|
||||||
run_performance_tests() {
|
|
||||||
log_info "Running performance benchmarks..."
|
|
||||||
|
|
||||||
local start_time=$(date +%s)
|
|
||||||
|
|
||||||
# Create large test files
|
|
||||||
local large_file="$TEST_DIR/large_test.db"
|
|
||||||
dd if=/dev/zero of="$large_file" bs=1M count=100 2>/dev/null
|
|
||||||
|
|
||||||
# Benchmark checksum calculation
|
|
||||||
local checksum_start=$(date +%s)
|
|
||||||
md5sum "$large_file" > /dev/null
|
|
||||||
local checksum_end=$(date +%s)
|
|
||||||
local checksum_time=$((checksum_end - checksum_start))
|
|
||||||
|
|
||||||
# Benchmark compression
|
|
||||||
local compress_start=$(date +%s)
|
|
||||||
tar -czf "$TEST_DIR/large_test.tar.gz" -C "$TEST_DIR" "large_test.db"
|
|
||||||
local compress_end=$(date +%s)
|
|
||||||
local compress_time=$((compress_end - compress_start))
|
|
||||||
|
|
||||||
local total_time=$(($(date +%s) - start_time))
|
|
||||||
|
|
||||||
log_info "Performance Results:"
|
|
||||||
log_info " Checksum (100MB): ${checksum_time}s"
|
|
||||||
log_info " Compression (100MB): ${compress_time}s"
|
|
||||||
log_info " Total benchmark time: ${total_time}s"
|
|
||||||
}
|
|
||||||
|
|
||||||
# Main execution
|
|
||||||
main() {
|
|
||||||
case "${1:-all}" in
|
|
||||||
"unit")
|
|
||||||
run_all_tests
|
|
||||||
;;
|
|
||||||
"integration")
|
|
||||||
run_integration_tests
|
|
||||||
;;
|
|
||||||
"performance")
|
|
||||||
run_performance_tests
|
|
||||||
;;
|
|
||||||
"all")
|
|
||||||
run_all_tests
|
|
||||||
# Uncomment for integration tests if environment supports it
|
|
||||||
# run_integration_tests
|
|
||||||
run_performance_tests
|
|
||||||
;;
|
|
||||||
*)
|
|
||||||
echo "Usage: $0 [unit|integration|performance|all]"
|
|
||||||
echo " unit - Run unit tests only"
|
|
||||||
echo " integration - Run integration tests (requires Plex)"
|
|
||||||
echo " performance - Run performance benchmarks"
|
|
||||||
echo " all - Run all available tests"
|
|
||||||
exit 1
|
|
||||||
;;
|
|
||||||
esac
|
|
||||||
|
|
||||||
generate_test_report
|
|
||||||
|
|
||||||
# Exit with appropriate code
|
|
||||||
if [ $TESTS_FAILED -gt 0 ]; then
|
|
||||||
exit 1
|
|
||||||
else
|
|
||||||
exit 0
|
|
||||||
fi
|
|
||||||
}
|
|
||||||
|
|
||||||
# Trap to ensure cleanup on exit
|
|
||||||
trap cleanup_test_environment EXIT
|
|
||||||
|
|
||||||
main "$@"
|
|
||||||
@@ -1,272 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
|
|
||||||
################################################################################
|
|
||||||
# Plex Recovery Validation Script
|
|
||||||
################################################################################
|
|
||||||
#
|
|
||||||
# Author: Peter Wood <peter@peterwood.dev>
|
|
||||||
# Description: Comprehensive validation script that verifies the success of
|
|
||||||
# Plex database recovery operations. Performs extensive checks
|
|
||||||
# on database integrity, service functionality, and system health
|
|
||||||
# to ensure complete recovery and operational readiness.
|
|
||||||
#
|
|
||||||
# Features:
|
|
||||||
# - Database integrity verification
|
|
||||||
# - Service functionality testing
|
|
||||||
# - Library accessibility checks
|
|
||||||
# - Performance validation
|
|
||||||
# - Web interface connectivity testing
|
|
||||||
# - Comprehensive recovery reporting
|
|
||||||
# - Post-recovery optimization suggestions
|
|
||||||
#
|
|
||||||
# Related Scripts:
|
|
||||||
# - recover-plex-database.sh: Primary recovery script validated by this tool
|
|
||||||
# - icu-aware-recovery.sh: ICU recovery validation
|
|
||||||
# - nuclear-plex-recovery.sh: Nuclear recovery validation
|
|
||||||
# - backup-plex.sh: Backup system that enables recovery
|
|
||||||
# - validate-plex-backups.sh: Backup validation tools
|
|
||||||
# - plex.sh: General Plex service management
|
|
||||||
#
|
|
||||||
# Usage:
|
|
||||||
# ./validate-plex-recovery.sh # Full validation suite
|
|
||||||
# ./validate-plex-recovery.sh --quick # Quick validation checks
|
|
||||||
# ./validate-plex-recovery.sh --detailed # Detailed analysis and reporting
|
|
||||||
# ./validate-plex-recovery.sh --performance # Performance validation only
|
|
||||||
#
|
|
||||||
# Dependencies:
|
|
||||||
# - sqlite3 or Plex SQLite binary
|
|
||||||
# - curl (for web interface testing)
|
|
||||||
# - systemctl (for service status checks)
|
|
||||||
# - Plex Media Server
|
|
||||||
#
|
|
||||||
# Exit Codes:
|
|
||||||
# 0 - Recovery validation successful
|
|
||||||
# 1 - General error
|
|
||||||
# 2 - Database validation failures
|
|
||||||
# 3 - Service functionality issues
|
|
||||||
# 4 - Performance concerns detected
|
|
||||||
# 5 - Partial recovery (requires attention)
|
|
||||||
#
|
|
||||||
################################################################################
|
|
||||||
|
|
||||||
# Final Plex Recovery Validation Script
|
|
||||||
# Comprehensive check to ensure Plex is fully recovered and functional
|
|
||||||
|
|
||||||
# Colors for output
|
|
||||||
RED='\033[0;31m'
|
|
||||||
GREEN='\033[0;32m'
|
|
||||||
YELLOW='\033[1;33m'
|
|
||||||
BLUE='\033[0;34m'
|
|
||||||
NC='\033[0m' # No Color
|
|
||||||
|
|
||||||
PLEX_DB_DIR="/var/lib/plexmediaserver/Library/Application Support/Plex Media Server/Plug-in Support/Databases"
|
|
||||||
|
|
||||||
print_status() {
|
|
||||||
local color="$1"
|
|
||||||
local message="$2"
|
|
||||||
echo -e "${color}${message}${NC}"
|
|
||||||
}
|
|
||||||
|
|
||||||
print_header() {
|
|
||||||
echo
|
|
||||||
print_status "$BLUE" "================================"
|
|
||||||
print_status "$BLUE" "$1"
|
|
||||||
print_status "$BLUE" "================================"
|
|
||||||
}
|
|
||||||
|
|
||||||
# Check service status
|
|
||||||
check_service_status() {
|
|
||||||
print_header "SERVICE STATUS CHECK"
|
|
||||||
|
|
||||||
if systemctl is-active --quiet plexmediaserver; then
|
|
||||||
print_status "$GREEN" "✓ Plex Media Server is running"
|
|
||||||
|
|
||||||
# Get service uptime
|
|
||||||
local uptime=$(systemctl show plexmediaserver --property=ActiveEnterTimestamp --value)
|
|
||||||
print_status "$GREEN" " Started: $uptime"
|
|
||||||
|
|
||||||
# Get memory usage
|
|
||||||
local memory=$(systemctl show plexmediaserver --property=MemoryCurrent --value)
|
|
||||||
if [[ -n "$memory" && "$memory" != "[not set]" ]]; then
|
|
||||||
local memory_mb=$((memory / 1024 / 1024))
|
|
||||||
print_status "$GREEN" " Memory usage: ${memory_mb}MB"
|
|
||||||
fi
|
|
||||||
|
|
||||||
return 0
|
|
||||||
else
|
|
||||||
print_status "$RED" "✗ Plex Media Server is not running"
|
|
||||||
return 1
|
|
||||||
fi
|
|
||||||
}
|
|
||||||
|
|
||||||
# Check database integrity
|
|
||||||
check_database_integrity() {
|
|
||||||
print_header "DATABASE INTEGRITY CHECK"
|
|
||||||
|
|
||||||
local main_db="${PLEX_DB_DIR}/com.plexapp.plugins.library.db"
|
|
||||||
local blobs_db="${PLEX_DB_DIR}/com.plexapp.plugins.library.blobs.db"
|
|
||||||
local all_good=true
|
|
||||||
|
|
||||||
# Check main database
|
|
||||||
if [[ -f "$main_db" ]]; then
|
|
||||||
local main_size=$(du -h "$main_db" | cut -f1)
|
|
||||||
print_status "$GREEN" "✓ Main database exists (${main_size})"
|
|
||||||
|
|
||||||
# Try basic database operations
|
|
||||||
if sqlite3 "$main_db" "SELECT COUNT(*) FROM sqlite_master WHERE type='table';" >/dev/null 2>&1; then
|
|
||||||
local table_count=$(sqlite3 "$main_db" "SELECT COUNT(*) FROM sqlite_master WHERE type='table';" 2>/dev/null)
|
|
||||||
print_status "$GREEN" " Contains $table_count tables"
|
|
||||||
else
|
|
||||||
print_status "$YELLOW" " Warning: Cannot query database tables"
|
|
||||||
all_good=false
|
|
||||||
fi
|
|
||||||
else
|
|
||||||
print_status "$RED" "✗ Main database missing"
|
|
||||||
all_good=false
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Check blobs database
|
|
||||||
if [[ -f "$blobs_db" ]]; then
|
|
||||||
local blobs_size=$(du -h "$blobs_db" | cut -f1)
|
|
||||||
print_status "$GREEN" "✓ Blobs database exists (${blobs_size})"
|
|
||||||
|
|
||||||
# Check if it's not empty (previous corruption was 0 bytes)
|
|
||||||
local blobs_bytes=$(stat -c%s "$blobs_db" 2>/dev/null || stat -f%z "$blobs_db" 2>/dev/null)
|
|
||||||
if [[ $blobs_bytes -gt 1000000 ]]; then
|
|
||||||
print_status "$GREEN" " File size is healthy ($(numfmt --to=iec $blobs_bytes))"
|
|
||||||
else
|
|
||||||
print_status "$RED" " Warning: File size is too small ($blobs_bytes bytes)"
|
|
||||||
all_good=false
|
|
||||||
fi
|
|
||||||
else
|
|
||||||
print_status "$RED" "✗ Blobs database missing"
|
|
||||||
all_good=false
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Check file ownership
|
|
||||||
local main_owner=$(stat -c%U:%G "$main_db" 2>/dev/null)
|
|
||||||
local blobs_owner=$(stat -c%U:%G "$blobs_db" 2>/dev/null)
|
|
||||||
|
|
||||||
if [[ "$main_owner" == "plex:plex" && "$blobs_owner" == "plex:plex" ]]; then
|
|
||||||
print_status "$GREEN" "✓ Database ownership is correct (plex:plex)"
|
|
||||||
else
|
|
||||||
print_status "$YELLOW" " Warning: Ownership issues detected"
|
|
||||||
print_status "$YELLOW" " Main DB: $main_owner, Blobs DB: $blobs_owner"
|
|
||||||
fi
|
|
||||||
|
|
||||||
return $([[ "$all_good" == "true" ]] && echo 0 || echo 1)
|
|
||||||
}
|
|
||||||
|
|
||||||
# Check web interface
|
|
||||||
check_web_interface() {
|
|
||||||
print_header "WEB INTERFACE CHECK"
|
|
||||||
|
|
||||||
local max_attempts=5
|
|
||||||
local attempt=1
|
|
||||||
|
|
||||||
while [[ $attempt -le $max_attempts ]]; do
|
|
||||||
if curl -s -o /dev/null -w "%{http_code}" "http://localhost:32400/web/index.html" | grep -q "200"; then
|
|
||||||
print_status "$GREEN" "✓ Web interface is accessible"
|
|
||||||
print_status "$GREEN" " URL: http://localhost:32400"
|
|
||||||
return 0
|
|
||||||
fi
|
|
||||||
|
|
||||||
print_status "$YELLOW" " Attempt $attempt/$max_attempts: Web interface not ready..."
|
|
||||||
sleep 2
|
|
||||||
((attempt++))
|
|
||||||
done
|
|
||||||
|
|
||||||
print_status "$RED" "✗ Web interface is not accessible"
|
|
||||||
return 1
|
|
||||||
}
|
|
||||||
|
|
||||||
# Check API functionality
|
|
||||||
check_api_functionality() {
|
|
||||||
print_header "API FUNCTIONALITY CHECK"
|
|
||||||
|
|
||||||
# Test root API endpoint
|
|
||||||
local api_response=$(curl -s "http://localhost:32400/" 2>/dev/null)
|
|
||||||
|
|
||||||
if echo "$api_response" | grep -q "Unauthorized\|web/index.html"; then
|
|
||||||
print_status "$GREEN" "✓ API is responding (redirect to web interface)"
|
|
||||||
else
|
|
||||||
print_status "$YELLOW" " Warning: Unexpected API response"
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Try to get server identity (this might work without auth)
|
|
||||||
local identity_response=$(curl -s "http://localhost:32400/identity" 2>/dev/null)
|
|
||||||
|
|
||||||
if echo "$identity_response" | grep -q "MediaContainer"; then
|
|
||||||
print_status "$GREEN" "✓ Server identity endpoint working"
|
|
||||||
else
|
|
||||||
print_status "$YELLOW" " Note: Server identity requires authentication"
|
|
||||||
fi
|
|
||||||
}
|
|
||||||
|
|
||||||
# Check recent logs for errors
|
|
||||||
check_recent_logs() {
|
|
||||||
print_header "RECENT LOGS CHECK"
|
|
||||||
|
|
||||||
# Check for recent errors in systemd logs
|
|
||||||
local recent_errors=$(sudo journalctl -u plexmediaserver --since "5 minutes ago" --no-pager -q 2>/dev/null | grep -i "error\|fail\|exception" | head -3)
|
|
||||||
|
|
||||||
if [[ -z "$recent_errors" ]]; then
|
|
||||||
print_status "$GREEN" "✓ No recent errors in service logs"
|
|
||||||
else
|
|
||||||
print_status "$YELLOW" " Recent log entries found:"
|
|
||||||
echo "$recent_errors" | while read -r line; do
|
|
||||||
print_status "$YELLOW" " $line"
|
|
||||||
done
|
|
||||||
fi
|
|
||||||
}
|
|
||||||
|
|
||||||
# Show recovery summary
|
|
||||||
show_recovery_summary() {
|
|
||||||
print_header "RECOVERY SUMMARY"
|
|
||||||
|
|
||||||
local corrupted_backup_dir="${PLEX_DB_DIR}/corrupted-20250605_060232"
|
|
||||||
if [[ -d "$corrupted_backup_dir" ]]; then
|
|
||||||
print_status "$GREEN" "✓ Corrupted databases backed up to:"
|
|
||||||
print_status "$GREEN" " $corrupted_backup_dir"
|
|
||||||
fi
|
|
||||||
|
|
||||||
print_status "$GREEN" "✓ Databases restored from: 2025-06-02 backups"
|
|
||||||
print_status "$GREEN" "✓ File ownership corrected to plex:plex"
|
|
||||||
print_status "$GREEN" "✓ Service restarted successfully"
|
|
||||||
|
|
||||||
echo
|
|
||||||
print_status "$BLUE" "NEXT STEPS:"
|
|
||||||
print_status "$YELLOW" "1. Access Plex at: http://localhost:32400"
|
|
||||||
print_status "$YELLOW" "2. Verify your libraries are intact"
|
|
||||||
print_status "$YELLOW" "3. Consider running a library scan to pick up recent changes"
|
|
||||||
print_status "$YELLOW" "4. Monitor the service for a few days to ensure stability"
|
|
||||||
}
|
|
||||||
|
|
||||||
# Main function
|
|
||||||
main() {
|
|
||||||
print_status "$BLUE" "PLEX RECOVERY VALIDATION"
|
|
||||||
print_status "$BLUE" "$(date)"
|
|
||||||
echo
|
|
||||||
|
|
||||||
local overall_status=0
|
|
||||||
|
|
||||||
check_service_status || overall_status=1
|
|
||||||
check_database_integrity || overall_status=1
|
|
||||||
check_web_interface || overall_status=1
|
|
||||||
check_api_functionality
|
|
||||||
check_recent_logs
|
|
||||||
show_recovery_summary
|
|
||||||
|
|
||||||
echo
|
|
||||||
if [[ $overall_status -eq 0 ]]; then
|
|
||||||
print_status "$GREEN" "🎉 RECOVERY SUCCESSFUL! Plex Media Server is fully functional."
|
|
||||||
else
|
|
||||||
print_status "$YELLOW" "⚠️ RECOVERY PARTIALLY SUCCESSFUL - Some issues detected."
|
|
||||||
print_status "$YELLOW" " Plex is running but may need additional attention."
|
|
||||||
fi
|
|
||||||
|
|
||||||
return $overall_status
|
|
||||||
}
|
|
||||||
|
|
||||||
# Run the validation
|
|
||||||
main "$@"
|
|
||||||
Reference in New Issue
Block a user