mirror of
https://github.com/acedanger/shell.git
synced 2025-12-06 00:00:13 -08:00
- Changed inline variable assignments to separate declaration and assignment for clarity. - Updated condition checks and log messages for better readability and consistency. - Added a backup of validate-plex-recovery.sh for safety. - Introduced a new script run-docker-tests.sh for testing setup in Docker containers. - Enhanced ssh-login.sh to improve condition checks and logging functionality.
748 lines
20 KiB
Bash
Executable File
748 lines
20 KiB
Bash
Executable File
#!/bin/bash
|
|
|
|
################################################################################
|
|
# Plex Backup System Comprehensive Test Suite
|
|
################################################################################
|
|
#
|
|
# Author: Peter Wood <peter@peterwood.dev>
|
|
# Description: Automated testing framework for the complete Plex backup
|
|
# ecosystem, providing unit tests, integration tests, and
|
|
# end-to-end validation of all backup operations.
|
|
#
|
|
# Features:
|
|
# - Unit testing for individual backup components
|
|
# - Integration testing for full backup workflows
|
|
# - Database integrity test scenarios
|
|
# - Service management testing
|
|
# - Performance benchmarking
|
|
# - Error condition simulation and recovery testing
|
|
# - Test result reporting and analysis
|
|
#
|
|
# Related Scripts:
|
|
# - backup-plex.sh: Primary script under test
|
|
# - restore-plex.sh: Restoration testing component
|
|
# - validate-plex-backups.sh: Validation testing
|
|
# - monitor-plex-backup.sh: Monitoring system testing
|
|
# - plex.sh: Service management testing
|
|
#
|
|
# Usage:
|
|
# ./test-plex-backup.sh # Run full test suite
|
|
# ./test-plex-backup.sh --unit # Unit tests only
|
|
# ./test-plex-backup.sh --integration # Integration tests only
|
|
# ./test-plex-backup.sh --quick # Quick smoke tests
|
|
# ./test-plex-backup.sh --cleanup # Clean up test artifacts
|
|
#
|
|
# Dependencies:
|
|
# - All Plex backup scripts in this directory
|
|
# - sqlite3 or Plex SQLite binary
|
|
# - jq (for JSON processing)
|
|
# - tar (for archive operations)
|
|
# - systemctl (for service testing)
|
|
#
|
|
# Exit Codes:
|
|
# 0 - All tests passed
|
|
# 1 - General error
|
|
# 2 - Test failures detected
|
|
# 3 - Missing dependencies
|
|
# 4 - Test setup failure
|
|
#
|
|
################################################################################
|
|
|
|
# Comprehensive Plex Backup System Test Suite
|
|
# This script provides automated testing for all backup-related functionality
|
|
|
|
set -e
|
|
|
|
# Color codes for output
|
|
RED='\033[0;31m'
|
|
GREEN='\033[0;32m'
|
|
YELLOW='\033[1;33m'
|
|
BLUE='\033[0;34m'
|
|
CYAN='\033[0;36m'
|
|
NC='\033[0m' # No Color
|
|
|
|
# Test configuration
|
|
TEST_DIR="/tmp/plex-backup-test-$(date +%s)"
|
|
TEST_BACKUP_ROOT="$TEST_DIR/backups"
|
|
TEST_LOG_ROOT="$TEST_DIR/logs"
|
|
TEST_RESULTS_FILE="$TEST_DIR/test-results.json"
|
|
|
|
# Test counters
|
|
TESTS_RUN=0
|
|
TESTS_PASSED=0
|
|
TESTS_FAILED=0
|
|
declare -a FAILED_TESTS=()
|
|
|
|
# Logging functions
|
|
log_test() {
|
|
local timestamp
|
|
timestamp=$(date '+%Y-%m-%d %H:%M:%S')
|
|
echo -e "${CYAN}[TEST ${timestamp}]${NC} $1"
|
|
}
|
|
|
|
log_pass() {
|
|
local timestamp
|
|
timestamp=$(date '+%Y-%m-%d %H:%M:%S')
|
|
echo -e "${GREEN}[PASS ${timestamp}]${NC} $1"
|
|
TESTS_PASSED=$((TESTS_PASSED + 1))
|
|
}
|
|
|
|
log_fail() {
|
|
local timestamp
|
|
timestamp=$(date '+%Y-%m-%d %H:%M:%S')
|
|
echo -e "${RED}[FAIL ${timestamp}]${NC} $1"
|
|
TESTS_FAILED=$((TESTS_FAILED + 1))
|
|
FAILED_TESTS+=("$1")
|
|
}
|
|
|
|
log_info() {
|
|
local timestamp
|
|
timestamp=$(date '+%Y-%m-%d %H:%M:%S')
|
|
echo -e "${BLUE}[INFO ${timestamp}]${NC} $1"
|
|
}
|
|
|
|
log_warn() {
|
|
local timestamp
|
|
timestamp=$(date '+%Y-%m-%d %H:%M:%S')
|
|
echo -e "${YELLOW}[WARN ${timestamp}]${NC} $1"
|
|
}
|
|
|
|
# Test framework functions
|
|
run_test() {
|
|
local test_name="$1"
|
|
local test_function="$2"
|
|
|
|
TESTS_RUN=$((TESTS_RUN + 1))
|
|
log_test "Running: $test_name"
|
|
|
|
if $test_function; then
|
|
log_pass "$test_name"
|
|
record_test_result "$test_name" "PASS" ""
|
|
else
|
|
log_fail "$test_name"
|
|
record_test_result "$test_name" "FAIL" "Test function returned non-zero exit code"
|
|
fi
|
|
}
|
|
|
|
record_test_result() {
|
|
local test_name="$1"
|
|
local status="$2"
|
|
local error_message="$3"
|
|
local timestamp
|
|
timestamp=$(date -Iseconds)
|
|
|
|
# Initialize results file if it doesn't exist
|
|
if [ ! -f "$TEST_RESULTS_FILE" ]; then
|
|
echo "[]" > "$TEST_RESULTS_FILE"
|
|
fi
|
|
|
|
local result
|
|
result=$(jq -n \
|
|
--arg test_name "$test_name" \
|
|
--arg status "$status" \
|
|
--arg error_message "$error_message" \
|
|
--arg timestamp "$timestamp" \
|
|
'{
|
|
test_name: $test_name,
|
|
status: $status,
|
|
error_message: $error_message,
|
|
timestamp: $timestamp
|
|
}')
|
|
|
|
jq --argjson result "$result" '. += [$result]' "$TEST_RESULTS_FILE" > "${TEST_RESULTS_FILE}.tmp" && \
|
|
mv "${TEST_RESULTS_FILE}.tmp" "$TEST_RESULTS_FILE"
|
|
}
|
|
|
|
# Setup test environment
|
|
setup_test_environment() {
|
|
log_info "Setting up test environment in $TEST_DIR"
|
|
|
|
# Create test directories
|
|
mkdir -p "$TEST_DIR"
|
|
mkdir -p "$TEST_BACKUP_ROOT"
|
|
mkdir -p "$TEST_LOG_ROOT"
|
|
mkdir -p "$TEST_DIR/mock_plex"
|
|
|
|
# Create mock Plex files for testing
|
|
echo "PRAGMA user_version=1;" > "$TEST_DIR/mock_plex/com.plexapp.plugins.library.db"
|
|
echo "PRAGMA user_version=1;" > "$TEST_DIR/mock_plex/com.plexapp.plugins.library.blobs.db"
|
|
dd if=/dev/zero of="$TEST_DIR/mock_plex/Preferences.xml" bs=1024 count=1 2>/dev/null
|
|
|
|
# Create mock performance log
|
|
echo "[]" > "$TEST_DIR/mock-performance.json"
|
|
echo "{}" > "$TEST_DIR/mock-backup.json"
|
|
|
|
log_info "Test environment setup complete"
|
|
}
|
|
|
|
# Cleanup test environment
|
|
cleanup_test_environment() {
|
|
if [ -d "$TEST_DIR" ]; then
|
|
log_info "Cleaning up test environment"
|
|
rm -rf "$TEST_DIR"
|
|
fi
|
|
}
|
|
|
|
# Mock functions to replace actual backup script functions
|
|
mock_manage_plex_service() {
|
|
local action="$1"
|
|
echo "Mock: Plex service $action"
|
|
return 0
|
|
}
|
|
|
|
mock_calculate_checksum() {
|
|
local file="$1"
|
|
echo "$file" | md5sum | cut -d' ' -f1
|
|
return 0
|
|
}
|
|
|
|
mock_verify_backup() {
|
|
local src="$1"
|
|
local dest="$2"
|
|
# Always return success for testing
|
|
return 0
|
|
}
|
|
|
|
# Test: JSON log initialization
|
|
test_json_log_initialization() {
|
|
local test_log="$TEST_DIR/test-init.json"
|
|
|
|
# Remove file if it exists
|
|
rm -f "$test_log"
|
|
|
|
# Test initialization
|
|
if [ ! -f "$test_log" ] || ! jq empty "$test_log" 2>/dev/null; then
|
|
echo "{}" > "$test_log"
|
|
fi
|
|
|
|
# Verify file exists and is valid JSON
|
|
if [ -f "$test_log" ] && jq empty "$test_log" 2>/dev/null; then
|
|
return 0
|
|
else
|
|
return 1
|
|
fi
|
|
}
|
|
|
|
# Test: Performance tracking
|
|
test_performance_tracking() {
|
|
local test_perf_log="$TEST_DIR/test-performance.json"
|
|
echo "[]" > "$test_perf_log"
|
|
|
|
# Mock performance tracking function
|
|
track_performance_test() {
|
|
local operation="$1"
|
|
local start_time="$2"
|
|
local end_time
|
|
end_time=$(date +%s)
|
|
local duration=$((end_time - start_time))
|
|
|
|
local entry
|
|
entry=$(jq -n \
|
|
--arg operation "$operation" \
|
|
--arg duration "$duration" \
|
|
--arg timestamp "$(date -Iseconds)" \
|
|
'{
|
|
operation: $operation,
|
|
duration_seconds: ($duration | tonumber),
|
|
timestamp: $timestamp
|
|
}')
|
|
|
|
jq --argjson entry "$entry" '. += [$entry]' "$test_perf_log" > "${test_perf_log}.tmp" && \
|
|
mv "${test_perf_log}.tmp" "$test_perf_log"
|
|
}
|
|
|
|
# Test tracking
|
|
local start_time
|
|
start_time=$(date +%s)
|
|
sleep 1 # Simulate work
|
|
track_performance_test "test_operation" "$start_time"
|
|
|
|
# Verify entry was added
|
|
local entry_count
|
|
entry_count=$(jq length "$test_perf_log")
|
|
if [ "$entry_count" -eq 1 ]; then
|
|
return 0
|
|
else
|
|
return 1
|
|
fi
|
|
}
|
|
|
|
# Test: Notification system
|
|
test_notification_system() {
|
|
# Mock notification function
|
|
send_notification_test() {
|
|
local title="$1"
|
|
local message="$2"
|
|
local status="${3:-info}"
|
|
|
|
# Just verify parameters are received correctly
|
|
if [ -n "$title" ] && [ -n "$message" ]; then
|
|
echo "Notification: $title - $message ($status)" > "$TEST_DIR/notification.log"
|
|
return 0
|
|
else
|
|
return 1
|
|
fi
|
|
}
|
|
|
|
# Test notification
|
|
send_notification_test "Test Title" "Test Message" "success"
|
|
|
|
# Verify notification was processed
|
|
if [ -f "$TEST_DIR/notification.log" ] && grep -q "Test Title" "$TEST_DIR/notification.log"; then
|
|
return 0
|
|
else
|
|
return 1
|
|
fi
|
|
}
|
|
|
|
# Test: Checksum caching
|
|
test_checksum_caching() {
|
|
local test_file="$TEST_DIR/checksum_test.txt"
|
|
local cache_file="${test_file}.md5"
|
|
|
|
# Create test file
|
|
echo "test content" > "$test_file"
|
|
|
|
# Mock checksum function with caching
|
|
calculate_checksum_test() {
|
|
local file="$1"
|
|
local cache_file="${file}.md5"
|
|
local file_mtime
|
|
file_mtime=$(stat -c %Y "$file" 2>/dev/null || echo "0")
|
|
|
|
# Check cache
|
|
if [ -f "$cache_file" ]; then
|
|
local cache_mtime
|
|
cache_mtime=$(stat -c %Y "$cache_file" 2>/dev/null || echo "0")
|
|
if [ "$cache_mtime" -gt "$file_mtime" ]; then
|
|
cat "$cache_file"
|
|
return 0
|
|
fi
|
|
fi
|
|
|
|
# Calculate and cache
|
|
local checksum
|
|
checksum=$(md5sum "$file" | cut -d' ' -f1)
|
|
echo "$checksum" > "$cache_file"
|
|
echo "$checksum"
|
|
}
|
|
|
|
# First calculation (should create cache)
|
|
local checksum1
|
|
checksum1=$(calculate_checksum_test "$test_file")
|
|
|
|
# Second calculation (should use cache)
|
|
local checksum2
|
|
checksum2=$(calculate_checksum_test "$test_file")
|
|
|
|
# Verify checksums match and cache file exists
|
|
if [ "$checksum1" = "$checksum2" ] && [ -f "$cache_file" ]; then
|
|
return 0
|
|
else
|
|
return 1
|
|
fi
|
|
}
|
|
|
|
# Test: Backup verification
|
|
test_backup_verification() {
|
|
local src_file="$TEST_DIR/source.txt"
|
|
local dest_file="$TEST_DIR/backup.txt"
|
|
|
|
# Create identical files
|
|
echo "backup test content" > "$src_file"
|
|
cp "$src_file" "$dest_file"
|
|
|
|
# Mock verification function
|
|
verify_backup_test() {
|
|
local src="$1"
|
|
local dest="$2"
|
|
|
|
local src_checksum
|
|
src_checksum=$(md5sum "$src" | cut -d' ' -f1)
|
|
local dest_checksum
|
|
dest_checksum=$(md5sum "$dest" | cut -d' ' -f1)
|
|
|
|
if [ "$src_checksum" = "$dest_checksum" ]; then
|
|
return 0
|
|
else
|
|
return 1
|
|
fi
|
|
}
|
|
|
|
# Test verification
|
|
if verify_backup_test "$src_file" "$dest_file"; then
|
|
return 0
|
|
else
|
|
return 1
|
|
fi
|
|
}
|
|
|
|
# Test: Parallel processing framework
|
|
test_parallel_processing() {
|
|
local temp_dir
|
|
temp_dir=$(mktemp -d)
|
|
local -a pids=()
|
|
local total_jobs=5
|
|
local completed_jobs=0
|
|
|
|
# Simulate parallel jobs
|
|
for i in $(seq 1 "$total_jobs"); do
|
|
(
|
|
# Simulate work
|
|
sleep 0."$i"
|
|
echo "$i" > "$temp_dir/job_$i.result"
|
|
) &
|
|
pids+=($!)
|
|
done
|
|
|
|
# Wait for all jobs
|
|
for pid in "${pids[@]}"; do
|
|
if wait "$pid"; then
|
|
completed_jobs=$((completed_jobs + 1))
|
|
fi
|
|
done
|
|
|
|
# Verify all jobs completed
|
|
local result_files
|
|
result_files=$(find "$temp_dir" -name "job_*.result" | wc -l)
|
|
|
|
# Cleanup
|
|
rm -rf "$temp_dir"
|
|
|
|
if [ "$completed_jobs" -eq "$total_jobs" ] && [ "$result_files" -eq "$total_jobs" ]; then
|
|
return 0
|
|
else
|
|
return 1
|
|
fi
|
|
}
|
|
|
|
# Test: Database integrity check simulation
|
|
test_database_integrity() {
|
|
local test_db="$TEST_DIR/test.db"
|
|
|
|
# Create a simple SQLite database
|
|
sqlite3 "$test_db" "CREATE TABLE test (id INTEGER, name TEXT);"
|
|
sqlite3 "$test_db" "INSERT INTO test VALUES (1, 'test');"
|
|
|
|
# Mock integrity check
|
|
check_integrity_test() {
|
|
local db_file="$1"
|
|
|
|
# Use sqlite3 instead of Plex SQLite for testing
|
|
local result
|
|
result=$(sqlite3 "$db_file" "PRAGMA integrity_check;" 2>/dev/null)
|
|
|
|
if echo "$result" | grep -q "ok"; then
|
|
return 0
|
|
else
|
|
return 1
|
|
fi
|
|
}
|
|
|
|
# Test integrity check
|
|
if check_integrity_test "$test_db"; then
|
|
return 0
|
|
else
|
|
return 1
|
|
fi
|
|
}
|
|
|
|
# Test: Configuration parsing
|
|
test_configuration_parsing() {
|
|
# Mock command line parsing
|
|
parse_args_test() {
|
|
local args=("$@")
|
|
local auto_repair=false
|
|
local parallel=true
|
|
local webhook=""
|
|
|
|
for arg in "${args[@]}"; do
|
|
case "$arg" in
|
|
--auto-repair) auto_repair=true ;;
|
|
--no-parallel) parallel=false ;;
|
|
--webhook=*) webhook="${arg#*=}" ;;
|
|
esac
|
|
done
|
|
|
|
# Return parsed values
|
|
echo "$auto_repair $parallel $webhook"
|
|
}
|
|
|
|
# Test parsing
|
|
local result
|
|
result=$(parse_args_test --auto-repair --webhook=http://example.com)
|
|
|
|
if echo "$result" | grep -q "true true http://example.com"; then
|
|
return 0
|
|
else
|
|
return 1
|
|
fi
|
|
}
|
|
|
|
# Test: Error handling
|
|
test_error_handling() {
|
|
# Mock function that can fail
|
|
test_function_with_error() {
|
|
local should_fail="$1"
|
|
|
|
if [ "$should_fail" = "true" ]; then
|
|
return 1
|
|
else
|
|
return 0
|
|
fi
|
|
}
|
|
|
|
# Test success case
|
|
if test_function_with_error "false"; then
|
|
# Test failure case
|
|
if ! test_function_with_error "true"; then
|
|
return 0 # Both cases worked as expected
|
|
fi
|
|
fi
|
|
|
|
return 1
|
|
}
|
|
|
|
# Run all unit tests
|
|
run_all_tests() {
|
|
log_info "Setting up test environment"
|
|
setup_test_environment
|
|
|
|
log_info "Starting unit tests"
|
|
|
|
# Core functionality tests
|
|
run_test "JSON Log Initialization" test_json_log_initialization
|
|
run_test "Performance Tracking" test_performance_tracking
|
|
run_test "Notification System" test_notification_system
|
|
run_test "Checksum Caching" test_checksum_caching
|
|
run_test "Backup Verification" test_backup_verification
|
|
run_test "Parallel Processing" test_parallel_processing
|
|
run_test "Database Integrity Check" test_database_integrity
|
|
run_test "Configuration Parsing" test_configuration_parsing
|
|
run_test "Error Handling" test_error_handling
|
|
|
|
log_info "Unit tests completed"
|
|
}
|
|
|
|
# Run integration tests (requires actual Plex environment)
|
|
run_integration_tests() {
|
|
log_info "Starting integration tests"
|
|
log_warn "Integration tests require a working Plex installation"
|
|
|
|
# Check if Plex service exists
|
|
if ! systemctl list-units --all | grep -q plexmediaserver; then
|
|
log_warn "Plex service not found - skipping integration tests"
|
|
return 0
|
|
fi
|
|
|
|
# Test actual service management (if safe to do so)
|
|
log_info "Integration tests would test actual Plex service management"
|
|
log_info "Skipping for safety - implement with caution"
|
|
}
|
|
|
|
# Run performance tests
|
|
run_performance_tests() {
|
|
log_info "Starting performance benchmarks"
|
|
|
|
local start_time
|
|
start_time=$(date +%s)
|
|
|
|
# Test file operations
|
|
local test_file="$TEST_DIR/perf_test.dat"
|
|
dd if=/dev/zero of="$test_file" bs=1M count=10 2>/dev/null
|
|
|
|
# Benchmark checksum calculation
|
|
local checksum_start
|
|
checksum_start=$(date +%s)
|
|
md5sum "$test_file" > /dev/null
|
|
local checksum_time=$(($(date +%s) - checksum_start))
|
|
|
|
# Benchmark compression
|
|
local compress_start
|
|
compress_start=$(date +%s)
|
|
tar -czf "$TEST_DIR/perf_test.tar.gz" -C "$TEST_DIR" "perf_test.dat"
|
|
local compress_time=$(($(date +%s) - compress_start))
|
|
|
|
local total_time=$(($(date +%s) - start_time))
|
|
|
|
log_info "Performance Results:"
|
|
log_info " Checksum (10MB): ${checksum_time}s"
|
|
log_info " Compression (10MB): ${compress_time}s"
|
|
log_info " Total benchmark time: ${total_time}s"
|
|
|
|
# Record performance data
|
|
local perf_entry
|
|
perf_entry=$(jq -n \
|
|
--arg checksum_time "$checksum_time" \
|
|
--arg compress_time "$compress_time" \
|
|
--arg total_time "$total_time" \
|
|
--arg timestamp "$(date -Iseconds)" \
|
|
'{
|
|
benchmark: "performance_test",
|
|
checksum_time_seconds: ($checksum_time | tonumber),
|
|
compress_time_seconds: ($compress_time | tonumber),
|
|
total_time_seconds: ($total_time | tonumber),
|
|
timestamp: $timestamp
|
|
}')
|
|
|
|
echo "$perf_entry" > "$TEST_DIR/performance_results.json"
|
|
}
|
|
|
|
# Generate comprehensive test report
|
|
generate_test_report() {
|
|
local timestamp
|
|
timestamp=$(date '+%Y-%m-%d %H:%M:%S')
|
|
|
|
echo
|
|
echo "=============================================="
|
|
echo " PLEX BACKUP TEST REPORT"
|
|
echo "=============================================="
|
|
echo "Test Run: $timestamp"
|
|
echo "Tests Run: $TESTS_RUN"
|
|
echo "Tests Passed: $TESTS_PASSED"
|
|
echo "Tests Failed: $TESTS_FAILED"
|
|
echo
|
|
|
|
if [ $TESTS_FAILED -gt 0 ]; then
|
|
echo "FAILED TESTS:"
|
|
for failed_test in "${FAILED_TESTS[@]}"; do
|
|
echo " - $failed_test"
|
|
done
|
|
echo
|
|
fi
|
|
|
|
local success_rate=0
|
|
if [ $TESTS_RUN -gt 0 ]; then
|
|
success_rate=$(( (TESTS_PASSED * 100) / TESTS_RUN ))
|
|
fi
|
|
|
|
echo "Success Rate: ${success_rate}%"
|
|
echo
|
|
|
|
if [ $TESTS_FAILED -eq 0 ]; then
|
|
log_pass "All tests passed successfully!"
|
|
else
|
|
log_fail "Some tests failed - review output above"
|
|
fi
|
|
|
|
# Save detailed results
|
|
if [ -f "$TEST_RESULTS_FILE" ]; then
|
|
local report_file
|
|
report_file="$TEST_DIR/test_report_$(date +%Y%m%d_%H%M%S).json"
|
|
jq -n \
|
|
--arg timestamp "$timestamp" \
|
|
--arg tests_run "$TESTS_RUN" \
|
|
--arg tests_passed "$TESTS_PASSED" \
|
|
--arg tests_failed "$TESTS_FAILED" \
|
|
--arg success_rate "$success_rate" \
|
|
--argjson failed_tests "$(printf '%s\n' "${FAILED_TESTS[@]}" | jq -R . | jq -s .)" \
|
|
--argjson test_details "$(cat "$TEST_RESULTS_FILE")" \
|
|
'{
|
|
test_run_timestamp: $timestamp,
|
|
summary: {
|
|
tests_run: ($tests_run | tonumber),
|
|
tests_passed: ($tests_passed | tonumber),
|
|
tests_failed: ($tests_failed | tonumber),
|
|
success_rate_percent: ($success_rate | tonumber)
|
|
},
|
|
failed_tests: $failed_tests,
|
|
detailed_results: $test_details
|
|
}' > "$report_file"
|
|
|
|
log_info "Detailed test report saved to: $report_file"
|
|
fi
|
|
}
|
|
|
|
# Integration tests (if requested)
|
|
run_integration_tests() {
|
|
log_info "Running integration tests..."
|
|
|
|
# Note: These would require actual Plex installation
|
|
# For now, we'll just indicate what would be tested
|
|
|
|
log_warn "Integration tests require running Plex Media Server"
|
|
log_warn "These tests would cover:"
|
|
log_warn " - Service stop/start functionality"
|
|
log_warn " - Database integrity checks"
|
|
log_warn " - Full backup and restore cycles"
|
|
log_warn " - Performance under load"
|
|
}
|
|
|
|
# Performance benchmarks
|
|
run_performance_tests() {
|
|
log_info "Running performance benchmarks..."
|
|
|
|
local start_time
|
|
start_time=$(date +%s)
|
|
|
|
# Create large test files
|
|
local large_file="$TEST_DIR/large_test.db"
|
|
dd if=/dev/zero of="$large_file" bs=1M count=100 2>/dev/null
|
|
|
|
# Benchmark checksum calculation
|
|
local checksum_start
|
|
checksum_start=$(date +%s)
|
|
md5sum "$large_file" > /dev/null
|
|
local checksum_end
|
|
checksum_end=$(date +%s)
|
|
local checksum_time=$((checksum_end - checksum_start))
|
|
|
|
# Benchmark compression
|
|
local compress_start
|
|
compress_start=$(date +%s)
|
|
tar -czf "$TEST_DIR/large_test.tar.gz" -C "$TEST_DIR" "large_test.db"
|
|
local compress_end
|
|
compress_end=$(date +%s)
|
|
local compress_time=$((compress_end - compress_start))
|
|
|
|
local total_time=$(($(date +%s) - start_time))
|
|
|
|
log_info "Performance Results:"
|
|
log_info " Checksum (100MB): ${checksum_time}s"
|
|
log_info " Compression (100MB): ${compress_time}s"
|
|
log_info " Total benchmark time: ${total_time}s"
|
|
}
|
|
|
|
# Main execution
|
|
main() {
|
|
case "${1:-all}" in
|
|
"unit")
|
|
run_all_tests
|
|
;;
|
|
"integration")
|
|
run_integration_tests
|
|
;;
|
|
"performance")
|
|
run_performance_tests
|
|
;;
|
|
"all")
|
|
run_all_tests
|
|
# Uncomment for integration tests if environment supports it
|
|
# run_integration_tests
|
|
run_performance_tests
|
|
;;
|
|
*)
|
|
echo "Usage: $0 [unit|integration|performance|all]"
|
|
echo " unit - Run unit tests only"
|
|
echo " integration - Run integration tests (requires Plex)"
|
|
echo " performance - Run performance benchmarks"
|
|
echo " all - Run all available tests"
|
|
exit 1
|
|
;;
|
|
esac
|
|
|
|
generate_test_report
|
|
|
|
# Exit with appropriate code
|
|
if [ $TESTS_FAILED -gt 0 ]; then
|
|
exit 1
|
|
else
|
|
exit 0
|
|
fi
|
|
}
|
|
|
|
# Trap to ensure cleanup on exit
|
|
trap cleanup_test_environment EXIT
|
|
|
|
main "$@"
|