mirror of
https://github.com/acedanger/shell.git
synced 2025-12-06 04:30:13 -08:00
feat: Add comprehensive Plex recovery validation script
- Introduced `validate-plex-recovery.sh` for validating Plex database recovery. - Implemented checks for service status, database integrity, web interface accessibility, API functionality, and recent logs. - Added detailed recovery summary and next steps for users. fix: Improve Debian patching script for compatibility - Enhanced `debian-patches.sh` to securely download and execute bootstrap scripts. - Updated package mapping logic and ensured proper permissions for patched files. fix: Update Docker test scripts for better permission handling - Modified `run-docker-tests.sh` to set appropriate permissions on logs directory. - Ensured log files have correct permissions after test runs. fix: Enhance setup scripts for secure installations - Updated `setup.sh` to securely download and execute installation scripts for zoxide and nvm. - Improved error handling for failed downloads. fix: Refine startup script for log directory permissions - Adjusted `startup.sh` to set proper permissions for log directories and files. chore: Revamp update-containers.sh for better error handling and logging - Rewrote `update-containers.sh` to include detailed logging and error handling. - Added validation for Docker image names and improved overall script robustness.
This commit is contained in:
@@ -1,5 +1,53 @@
|
||||
#!/bin/bash
|
||||
|
||||
################################################################################
|
||||
# Plex Backup System Comprehensive Test Suite
|
||||
################################################################################
|
||||
#
|
||||
# Author: Peter Wood <peter@peterwood.dev>
|
||||
# Description: Automated testing framework for the complete Plex backup
|
||||
# ecosystem, providing unit tests, integration tests, and
|
||||
# end-to-end validation of all backup operations.
|
||||
#
|
||||
# Features:
|
||||
# - Unit testing for individual backup components
|
||||
# - Integration testing for full backup workflows
|
||||
# - Database integrity test scenarios
|
||||
# - Service management testing
|
||||
# - Performance benchmarking
|
||||
# - Error condition simulation and recovery testing
|
||||
# - Test result reporting and analysis
|
||||
#
|
||||
# Related Scripts:
|
||||
# - backup-plex.sh: Primary script under test
|
||||
# - restore-plex.sh: Restoration testing component
|
||||
# - validate-plex-backups.sh: Validation testing
|
||||
# - monitor-plex-backup.sh: Monitoring system testing
|
||||
# - plex.sh: Service management testing
|
||||
#
|
||||
# Usage:
|
||||
# ./test-plex-backup.sh # Run full test suite
|
||||
# ./test-plex-backup.sh --unit # Unit tests only
|
||||
# ./test-plex-backup.sh --integration # Integration tests only
|
||||
# ./test-plex-backup.sh --quick # Quick smoke tests
|
||||
# ./test-plex-backup.sh --cleanup # Clean up test artifacts
|
||||
#
|
||||
# Dependencies:
|
||||
# - All Plex backup scripts in this directory
|
||||
# - sqlite3 or Plex SQLite binary
|
||||
# - jq (for JSON processing)
|
||||
# - tar (for archive operations)
|
||||
# - systemctl (for service testing)
|
||||
#
|
||||
# Exit Codes:
|
||||
# 0 - All tests passed
|
||||
# 1 - General error
|
||||
# 2 - Test failures detected
|
||||
# 3 - Missing dependencies
|
||||
# 4 - Test setup failure
|
||||
#
|
||||
################################################################################
|
||||
|
||||
# Comprehensive Plex Backup System Test Suite
|
||||
# This script provides automated testing for all backup-related functionality
|
||||
|
||||
@@ -59,10 +107,10 @@ log_warn() {
|
||||
run_test() {
|
||||
local test_name="$1"
|
||||
local test_function="$2"
|
||||
|
||||
|
||||
TESTS_RUN=$((TESTS_RUN + 1))
|
||||
log_test "Running: $test_name"
|
||||
|
||||
|
||||
if $test_function; then
|
||||
log_pass "$test_name"
|
||||
record_test_result "$test_name" "PASS" ""
|
||||
@@ -77,12 +125,12 @@ record_test_result() {
|
||||
local status="$2"
|
||||
local error_message="$3"
|
||||
local timestamp=$(date -Iseconds)
|
||||
|
||||
|
||||
# Initialize results file if it doesn't exist
|
||||
if [ ! -f "$TEST_RESULTS_FILE" ]; then
|
||||
echo "[]" > "$TEST_RESULTS_FILE"
|
||||
fi
|
||||
|
||||
|
||||
local result=$(jq -n \
|
||||
--arg test_name "$test_name" \
|
||||
--arg status "$status" \
|
||||
@@ -94,7 +142,7 @@ record_test_result() {
|
||||
error_message: $error_message,
|
||||
timestamp: $timestamp
|
||||
}')
|
||||
|
||||
|
||||
jq --argjson result "$result" '. += [$result]' "$TEST_RESULTS_FILE" > "${TEST_RESULTS_FILE}.tmp" && \
|
||||
mv "${TEST_RESULTS_FILE}.tmp" "$TEST_RESULTS_FILE"
|
||||
}
|
||||
@@ -102,22 +150,22 @@ record_test_result() {
|
||||
# Setup test environment
|
||||
setup_test_environment() {
|
||||
log_info "Setting up test environment in $TEST_DIR"
|
||||
|
||||
|
||||
# Create test directories
|
||||
mkdir -p "$TEST_DIR"
|
||||
mkdir -p "$TEST_BACKUP_ROOT"
|
||||
mkdir -p "$TEST_LOG_ROOT"
|
||||
mkdir -p "$TEST_DIR/mock_plex"
|
||||
|
||||
|
||||
# Create mock Plex files for testing
|
||||
echo "PRAGMA user_version=1;" > "$TEST_DIR/mock_plex/com.plexapp.plugins.library.db"
|
||||
echo "PRAGMA user_version=1;" > "$TEST_DIR/mock_plex/com.plexapp.plugins.library.blobs.db"
|
||||
dd if=/dev/zero of="$TEST_DIR/mock_plex/Preferences.xml" bs=1024 count=1 2>/dev/null
|
||||
|
||||
|
||||
# Create mock performance log
|
||||
echo "[]" > "$TEST_DIR/mock-performance.json"
|
||||
echo "{}" > "$TEST_DIR/mock-backup.json"
|
||||
|
||||
|
||||
log_info "Test environment setup complete"
|
||||
}
|
||||
|
||||
@@ -152,15 +200,15 @@ mock_verify_backup() {
|
||||
# Test: JSON log initialization
|
||||
test_json_log_initialization() {
|
||||
local test_log="$TEST_DIR/test-init.json"
|
||||
|
||||
|
||||
# Remove file if it exists
|
||||
rm -f "$test_log"
|
||||
|
||||
|
||||
# Test initialization
|
||||
if [ ! -f "$test_log" ] || ! jq empty "$test_log" 2>/dev/null; then
|
||||
echo "{}" > "$test_log"
|
||||
fi
|
||||
|
||||
|
||||
# Verify file exists and is valid JSON
|
||||
if [ -f "$test_log" ] && jq empty "$test_log" 2>/dev/null; then
|
||||
return 0
|
||||
@@ -173,14 +221,14 @@ test_json_log_initialization() {
|
||||
test_performance_tracking() {
|
||||
local test_perf_log="$TEST_DIR/test-performance.json"
|
||||
echo "[]" > "$test_perf_log"
|
||||
|
||||
|
||||
# Mock performance tracking function
|
||||
track_performance_test() {
|
||||
local operation="$1"
|
||||
local start_time="$2"
|
||||
local end_time=$(date +%s)
|
||||
local duration=$((end_time - start_time))
|
||||
|
||||
|
||||
local entry=$(jq -n \
|
||||
--arg operation "$operation" \
|
||||
--arg duration "$duration" \
|
||||
@@ -190,16 +238,16 @@ test_performance_tracking() {
|
||||
duration_seconds: ($duration | tonumber),
|
||||
timestamp: $timestamp
|
||||
}')
|
||||
|
||||
|
||||
jq --argjson entry "$entry" '. += [$entry]' "$test_perf_log" > "${test_perf_log}.tmp" && \
|
||||
mv "${test_perf_log}.tmp" "$test_perf_log"
|
||||
}
|
||||
|
||||
|
||||
# Test tracking
|
||||
local start_time=$(date +%s)
|
||||
sleep 1 # Simulate work
|
||||
track_performance_test "test_operation" "$start_time"
|
||||
|
||||
|
||||
# Verify entry was added
|
||||
local entry_count=$(jq length "$test_perf_log")
|
||||
if [ "$entry_count" -eq 1 ]; then
|
||||
@@ -216,7 +264,7 @@ test_notification_system() {
|
||||
local title="$1"
|
||||
local message="$2"
|
||||
local status="${3:-info}"
|
||||
|
||||
|
||||
# Just verify parameters are received correctly
|
||||
if [ -n "$title" ] && [ -n "$message" ]; then
|
||||
echo "Notification: $title - $message ($status)" > "$TEST_DIR/notification.log"
|
||||
@@ -225,10 +273,10 @@ test_notification_system() {
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
|
||||
# Test notification
|
||||
send_notification_test "Test Title" "Test Message" "success"
|
||||
|
||||
|
||||
# Verify notification was processed
|
||||
if [ -f "$TEST_DIR/notification.log" ] && grep -q "Test Title" "$TEST_DIR/notification.log"; then
|
||||
return 0
|
||||
@@ -241,16 +289,16 @@ test_notification_system() {
|
||||
test_checksum_caching() {
|
||||
local test_file="$TEST_DIR/checksum_test.txt"
|
||||
local cache_file="${test_file}.md5"
|
||||
|
||||
|
||||
# Create test file
|
||||
echo "test content" > "$test_file"
|
||||
|
||||
|
||||
# Mock checksum function with caching
|
||||
calculate_checksum_test() {
|
||||
local file="$1"
|
||||
local cache_file="${file}.md5"
|
||||
local file_mtime=$(stat -c %Y "$file" 2>/dev/null || echo "0")
|
||||
|
||||
|
||||
# Check cache
|
||||
if [ -f "$cache_file" ]; then
|
||||
local cache_mtime=$(stat -c %Y "$cache_file" 2>/dev/null || echo "0")
|
||||
@@ -259,19 +307,19 @@ test_checksum_caching() {
|
||||
return 0
|
||||
fi
|
||||
fi
|
||||
|
||||
|
||||
# Calculate and cache
|
||||
local checksum=$(md5sum "$file" | cut -d' ' -f1)
|
||||
echo "$checksum" > "$cache_file"
|
||||
echo "$checksum"
|
||||
}
|
||||
|
||||
|
||||
# First calculation (should create cache)
|
||||
local checksum1=$(calculate_checksum_test "$test_file")
|
||||
|
||||
|
||||
# Second calculation (should use cache)
|
||||
local checksum2=$(calculate_checksum_test "$test_file")
|
||||
|
||||
|
||||
# Verify checksums match and cache file exists
|
||||
if [ "$checksum1" = "$checksum2" ] && [ -f "$cache_file" ]; then
|
||||
return 0
|
||||
@@ -284,26 +332,26 @@ test_checksum_caching() {
|
||||
test_backup_verification() {
|
||||
local src_file="$TEST_DIR/source.txt"
|
||||
local dest_file="$TEST_DIR/backup.txt"
|
||||
|
||||
|
||||
# Create identical files
|
||||
echo "backup test content" > "$src_file"
|
||||
cp "$src_file" "$dest_file"
|
||||
|
||||
|
||||
# Mock verification function
|
||||
verify_backup_test() {
|
||||
local src="$1"
|
||||
local dest="$2"
|
||||
|
||||
|
||||
local src_checksum=$(md5sum "$src" | cut -d' ' -f1)
|
||||
local dest_checksum=$(md5sum "$dest" | cut -d' ' -f1)
|
||||
|
||||
|
||||
if [ "$src_checksum" = "$dest_checksum" ]; then
|
||||
return 0
|
||||
else
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
|
||||
# Test verification
|
||||
if verify_backup_test "$src_file" "$dest_file"; then
|
||||
return 0
|
||||
@@ -318,7 +366,7 @@ test_parallel_processing() {
|
||||
local -a pids=()
|
||||
local total_jobs=5
|
||||
local completed_jobs=0
|
||||
|
||||
|
||||
# Simulate parallel jobs
|
||||
for i in $(seq 1 $total_jobs); do
|
||||
(
|
||||
@@ -328,20 +376,20 @@ test_parallel_processing() {
|
||||
) &
|
||||
pids+=($!)
|
||||
done
|
||||
|
||||
|
||||
# Wait for all jobs
|
||||
for pid in "${pids[@]}"; do
|
||||
if wait "$pid"; then
|
||||
completed_jobs=$((completed_jobs + 1))
|
||||
fi
|
||||
done
|
||||
|
||||
|
||||
# Verify all jobs completed
|
||||
local result_files=$(find "$temp_dir" -name "job_*.result" | wc -l)
|
||||
|
||||
|
||||
# Cleanup
|
||||
rm -rf "$temp_dir"
|
||||
|
||||
|
||||
if [ "$completed_jobs" -eq "$total_jobs" ] && [ "$result_files" -eq "$total_jobs" ]; then
|
||||
return 0
|
||||
else
|
||||
@@ -352,25 +400,25 @@ test_parallel_processing() {
|
||||
# Test: Database integrity check simulation
|
||||
test_database_integrity() {
|
||||
local test_db="$TEST_DIR/test.db"
|
||||
|
||||
|
||||
# Create a simple SQLite database
|
||||
sqlite3 "$test_db" "CREATE TABLE test (id INTEGER, name TEXT);"
|
||||
sqlite3 "$test_db" "INSERT INTO test VALUES (1, 'test');"
|
||||
|
||||
|
||||
# Mock integrity check
|
||||
check_integrity_test() {
|
||||
local db_file="$1"
|
||||
|
||||
|
||||
# Use sqlite3 instead of Plex SQLite for testing
|
||||
local result=$(sqlite3 "$db_file" "PRAGMA integrity_check;" 2>/dev/null)
|
||||
|
||||
|
||||
if echo "$result" | grep -q "ok"; then
|
||||
return 0
|
||||
else
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
|
||||
# Test integrity check
|
||||
if check_integrity_test "$test_db"; then
|
||||
return 0
|
||||
@@ -387,7 +435,7 @@ test_configuration_parsing() {
|
||||
local auto_repair=false
|
||||
local parallel=true
|
||||
local webhook=""
|
||||
|
||||
|
||||
for arg in "${args[@]}"; do
|
||||
case "$arg" in
|
||||
--auto-repair) auto_repair=true ;;
|
||||
@@ -395,14 +443,14 @@ test_configuration_parsing() {
|
||||
--webhook=*) webhook="${arg#*=}" ;;
|
||||
esac
|
||||
done
|
||||
|
||||
|
||||
# Return parsed values
|
||||
echo "$auto_repair $parallel $webhook"
|
||||
}
|
||||
|
||||
|
||||
# Test parsing
|
||||
local result=$(parse_args_test --auto-repair --webhook=http://example.com)
|
||||
|
||||
|
||||
if echo "$result" | grep -q "true true http://example.com"; then
|
||||
return 0
|
||||
else
|
||||
@@ -415,14 +463,14 @@ test_error_handling() {
|
||||
# Mock function that can fail
|
||||
test_function_with_error() {
|
||||
local should_fail="$1"
|
||||
|
||||
|
||||
if [ "$should_fail" = "true" ]; then
|
||||
return 1
|
||||
else
|
||||
return 0
|
||||
fi
|
||||
}
|
||||
|
||||
|
||||
# Test success case
|
||||
if test_function_with_error "false"; then
|
||||
# Test failure case
|
||||
@@ -430,7 +478,7 @@ test_error_handling() {
|
||||
return 0 # Both cases worked as expected
|
||||
fi
|
||||
fi
|
||||
|
||||
|
||||
return 1
|
||||
}
|
||||
|
||||
@@ -438,9 +486,9 @@ test_error_handling() {
|
||||
run_all_tests() {
|
||||
log_info "Setting up test environment"
|
||||
setup_test_environment
|
||||
|
||||
|
||||
log_info "Starting unit tests"
|
||||
|
||||
|
||||
# Core functionality tests
|
||||
run_test "JSON Log Initialization" test_json_log_initialization
|
||||
run_test "Performance Tracking" test_performance_tracking
|
||||
@@ -451,7 +499,7 @@ run_all_tests() {
|
||||
run_test "Database Integrity Check" test_database_integrity
|
||||
run_test "Configuration Parsing" test_configuration_parsing
|
||||
run_test "Error Handling" test_error_handling
|
||||
|
||||
|
||||
log_info "Unit tests completed"
|
||||
}
|
||||
|
||||
@@ -459,13 +507,13 @@ run_all_tests() {
|
||||
run_integration_tests() {
|
||||
log_info "Starting integration tests"
|
||||
log_warn "Integration tests require a working Plex installation"
|
||||
|
||||
|
||||
# Check if Plex service exists
|
||||
if ! systemctl list-units --all | grep -q plexmediaserver; then
|
||||
log_warn "Plex service not found - skipping integration tests"
|
||||
return 0
|
||||
fi
|
||||
|
||||
|
||||
# Test actual service management (if safe to do so)
|
||||
log_info "Integration tests would test actual Plex service management"
|
||||
log_info "Skipping for safety - implement with caution"
|
||||
@@ -474,30 +522,30 @@ run_integration_tests() {
|
||||
# Run performance tests
|
||||
run_performance_tests() {
|
||||
log_info "Starting performance benchmarks"
|
||||
|
||||
|
||||
local start_time=$(date +%s)
|
||||
|
||||
|
||||
# Test file operations
|
||||
local test_file="$TEST_DIR/perf_test.dat"
|
||||
dd if=/dev/zero of="$test_file" bs=1M count=10 2>/dev/null
|
||||
|
||||
|
||||
# Benchmark checksum calculation
|
||||
local checksum_start=$(date +%s)
|
||||
md5sum "$test_file" > /dev/null
|
||||
local checksum_time=$(($(date +%s) - checksum_start))
|
||||
|
||||
|
||||
# Benchmark compression
|
||||
local compress_start=$(date +%s)
|
||||
tar -czf "$TEST_DIR/perf_test.tar.gz" -C "$TEST_DIR" "perf_test.dat"
|
||||
local compress_time=$(($(date +%s) - compress_start))
|
||||
|
||||
|
||||
local total_time=$(($(date +%s) - start_time))
|
||||
|
||||
|
||||
log_info "Performance Results:"
|
||||
log_info " Checksum (10MB): ${checksum_time}s"
|
||||
log_info " Compression (10MB): ${compress_time}s"
|
||||
log_info " Total benchmark time: ${total_time}s"
|
||||
|
||||
|
||||
# Record performance data
|
||||
local perf_entry=$(jq -n \
|
||||
--arg checksum_time "$checksum_time" \
|
||||
@@ -511,14 +559,14 @@ run_performance_tests() {
|
||||
total_time_seconds: ($total_time | tonumber),
|
||||
timestamp: $timestamp
|
||||
}')
|
||||
|
||||
|
||||
echo "$perf_entry" > "$TEST_DIR/performance_results.json"
|
||||
}
|
||||
|
||||
# Generate comprehensive test report
|
||||
generate_test_report() {
|
||||
local timestamp=$(date '+%Y-%m-%d %H:%M:%S')
|
||||
|
||||
|
||||
echo
|
||||
echo "=============================================="
|
||||
echo " PLEX BACKUP TEST REPORT"
|
||||
@@ -528,7 +576,7 @@ generate_test_report() {
|
||||
echo "Tests Passed: $TESTS_PASSED"
|
||||
echo "Tests Failed: $TESTS_FAILED"
|
||||
echo
|
||||
|
||||
|
||||
if [ $TESTS_FAILED -gt 0 ]; then
|
||||
echo "FAILED TESTS:"
|
||||
for failed_test in "${FAILED_TESTS[@]}"; do
|
||||
@@ -536,21 +584,21 @@ generate_test_report() {
|
||||
done
|
||||
echo
|
||||
fi
|
||||
|
||||
|
||||
local success_rate=0
|
||||
if [ $TESTS_RUN -gt 0 ]; then
|
||||
success_rate=$(( (TESTS_PASSED * 100) / TESTS_RUN ))
|
||||
fi
|
||||
|
||||
|
||||
echo "Success Rate: ${success_rate}%"
|
||||
echo
|
||||
|
||||
|
||||
if [ $TESTS_FAILED -eq 0 ]; then
|
||||
log_pass "All tests passed successfully!"
|
||||
else
|
||||
log_fail "Some tests failed - review output above"
|
||||
fi
|
||||
|
||||
|
||||
# Save detailed results
|
||||
if [ -f "$TEST_RESULTS_FILE" ]; then
|
||||
local report_file="$TEST_DIR/test_report_$(date +%Y%m%d_%H%M%S).json"
|
||||
@@ -573,7 +621,7 @@ generate_test_report() {
|
||||
failed_tests: $failed_tests,
|
||||
detailed_results: $test_details
|
||||
}' > "$report_file"
|
||||
|
||||
|
||||
log_info "Detailed test report saved to: $report_file"
|
||||
fi
|
||||
}
|
||||
@@ -581,10 +629,10 @@ generate_test_report() {
|
||||
# Integration tests (if requested)
|
||||
run_integration_tests() {
|
||||
log_info "Running integration tests..."
|
||||
|
||||
|
||||
# Note: These would require actual Plex installation
|
||||
# For now, we'll just indicate what would be tested
|
||||
|
||||
|
||||
log_warn "Integration tests require running Plex Media Server"
|
||||
log_warn "These tests would cover:"
|
||||
log_warn " - Service stop/start functionality"
|
||||
@@ -596,27 +644,27 @@ run_integration_tests() {
|
||||
# Performance benchmarks
|
||||
run_performance_tests() {
|
||||
log_info "Running performance benchmarks..."
|
||||
|
||||
|
||||
local start_time=$(date +%s)
|
||||
|
||||
|
||||
# Create large test files
|
||||
local large_file="$TEST_DIR/large_test.db"
|
||||
dd if=/dev/zero of="$large_file" bs=1M count=100 2>/dev/null
|
||||
|
||||
|
||||
# Benchmark checksum calculation
|
||||
local checksum_start=$(date +%s)
|
||||
md5sum "$large_file" > /dev/null
|
||||
local checksum_end=$(date +%s)
|
||||
local checksum_time=$((checksum_end - checksum_start))
|
||||
|
||||
|
||||
# Benchmark compression
|
||||
local compress_start=$(date +%s)
|
||||
tar -czf "$TEST_DIR/large_test.tar.gz" -C "$TEST_DIR" "large_test.db"
|
||||
local compress_end=$(date +%s)
|
||||
local compress_time=$((compress_end - compress_start))
|
||||
|
||||
|
||||
local total_time=$(($(date +%s) - start_time))
|
||||
|
||||
|
||||
log_info "Performance Results:"
|
||||
log_info " Checksum (100MB): ${checksum_time}s"
|
||||
log_info " Compression (100MB): ${compress_time}s"
|
||||
@@ -650,9 +698,9 @@ main() {
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
|
||||
|
||||
generate_test_report
|
||||
|
||||
|
||||
# Exit with appropriate code
|
||||
if [ $TESTS_FAILED -gt 0 ]; then
|
||||
exit 1
|
||||
|
||||
Reference in New Issue
Block a user