mirror of
https://github.com/acedanger/shell.git
synced 2025-12-06 01:10:12 -08:00
- Created a centralized database repair script (`plex-database-repair.sh`) to handle all database integrity checks and repairs for Plex Media Server. - Updated the main Plex management script (`plex.sh`) to integrate the new repair functionality and fixed Unicode/ASCII display issues. - Refactored the backup script (`backup-plex.sh`) to remove duplicate repair functions and ensure it utilizes the new repair script. - Conducted thorough code validation and functional testing to ensure all scripts operate correctly with the new changes. - Enhanced documentation for the new repair script, detailing usage, features, and integration points with other scripts. - Fixed critical bugs related to WAL file handling and corrected typos in script options.
650 lines
22 KiB
Bash
Executable File
650 lines
22 KiB
Bash
Executable File
#!/bin/bash
|
|
|
|
################################################################################
|
|
# Plex Database Repair Utility
|
|
################################################################################
|
|
#
|
|
# Author: Peter Wood <peter@peterwood.dev>
|
|
# Description: Shared database repair functionality for Plex Media Server
|
|
# Extracted from backup-plex.sh to be reusable across scripts
|
|
#
|
|
# Features:
|
|
# - Database integrity verification with automatic repair
|
|
# - WAL (Write-Ahead Logging) file handling
|
|
# - Multiple repair strategies (dump/restore, schema recreation, backup recovery)
|
|
# - Comprehensive error handling and recovery
|
|
#
|
|
# Usage:
|
|
# ./plex-database-repair.sh check <database_file> # Check integrity only
|
|
# ./plex-database-repair.sh repair <database_file> # Attempt repair
|
|
# ./plex-database-repair.sh force-repair <database_file> # Force repair without prompts
|
|
#
|
|
# Exit Codes:
|
|
# 0 - Success (database is healthy or successfully repaired)
|
|
# 1 - Database has issues but repair failed
|
|
# 2 - Critical error (cannot access database or repair tools)
|
|
#
|
|
################################################################################
|
|
|
|
# Color codes for output
|
|
RED='\033[0;31m'
|
|
GREEN='\033[0;32m'
|
|
YELLOW='\033[1;33m'
|
|
BLUE='\033[0;34m'
|
|
CYAN='\033[0;36m'
|
|
NC='\033[0m' # No Color
|
|
|
|
# Configuration
|
|
PLEX_SQLITE="/usr/lib/plexmediaserver/Plex SQLite"
|
|
BACKUP_ROOT="/mnt/share/media/backups/plex"
|
|
|
|
# Logging functions
|
|
log_message() {
|
|
local message="$1"
|
|
local timestamp
|
|
timestamp=$(date '+%Y-%m-%d %H:%M:%S')
|
|
echo -e "${CYAN}[${timestamp}]${NC} ${message}"
|
|
}
|
|
|
|
log_error() {
|
|
local message="$1"
|
|
local timestamp
|
|
timestamp=$(date '+%Y-%m-%d %H:%M:%S')
|
|
echo -e "${RED}[${timestamp}] ERROR:${NC} ${message}" >&2
|
|
}
|
|
|
|
log_success() {
|
|
local message="$1"
|
|
local timestamp
|
|
timestamp=$(date '+%Y-%m-%d %H:%M:%S')
|
|
echo -e "${GREEN}[${timestamp}] SUCCESS:${NC} ${message}"
|
|
}
|
|
|
|
log_warning() {
|
|
local message="$1"
|
|
local timestamp
|
|
timestamp=$(date '+%Y-%m-%d %H:%M:%S')
|
|
echo -e "${YELLOW}[${timestamp}] WARNING:${NC} ${message}"
|
|
}
|
|
|
|
log_info() {
|
|
local message="$1"
|
|
local timestamp
|
|
timestamp=$(date '+%Y-%m-%d %H:%M:%S')
|
|
echo -e "${BLUE}[${timestamp}] INFO:${NC} ${message}"
|
|
}
|
|
|
|
# Check if Plex SQLite binary exists and is executable
|
|
check_plex_sqlite() {
|
|
if [[ ! -f "$PLEX_SQLITE" ]]; then
|
|
log_error "Plex SQLite binary not found at: $PLEX_SQLITE"
|
|
return 1
|
|
fi
|
|
|
|
if ! sudo chmod +x "$PLEX_SQLITE" 2>/dev/null; then
|
|
log_warning "Could not make Plex SQLite executable, but will try to use it"
|
|
fi
|
|
|
|
return 0
|
|
}
|
|
|
|
# Enhanced WAL file management for repair operations
|
|
handle_wal_files_for_repair() {
|
|
local db_file="$1"
|
|
local operation="${2:-prepare}" # prepare, cleanup, or restore
|
|
|
|
local db_dir
|
|
db_dir=$(dirname "$db_file")
|
|
local db_base
|
|
db_base=$(basename "$db_file" .db)
|
|
local wal_file="${db_dir}/${db_base}.db-wal"
|
|
local shm_file="${db_dir}/${db_base}.db-shm"
|
|
|
|
case "$operation" in
|
|
"prepare")
|
|
log_message "Preparing WAL files for repair of $(basename "$db_file")"
|
|
|
|
# Force WAL checkpoint to consolidate all changes
|
|
if [[ -f "$wal_file" ]]; then
|
|
log_info "Found WAL file, performing checkpoint..."
|
|
if sudo "$PLEX_SQLITE" "$db_file" "PRAGMA wal_checkpoint(TRUNCATE);" 2>/dev/null; then
|
|
log_success "WAL checkpoint completed"
|
|
else
|
|
log_warning "WAL checkpoint failed, but continuing"
|
|
fi
|
|
fi
|
|
|
|
# Create backup copies of WAL/SHM files if they exist
|
|
for file in "$wal_file" "$shm_file"; do
|
|
if [[ -f "$file" ]]; then
|
|
local backup_file="${file}.repair-backup"
|
|
if sudo cp "$file" "$backup_file" 2>/dev/null; then
|
|
log_info "Backed up $(basename "$file")"
|
|
else
|
|
log_warning "Failed to backup $(basename "$file")"
|
|
fi
|
|
fi
|
|
done
|
|
;;
|
|
|
|
"cleanup")
|
|
log_message "Cleaning up WAL files after repair"
|
|
|
|
# Remove any remaining WAL/SHM files to force clean state
|
|
for file in "$wal_file" "$shm_file"; do
|
|
if [[ -f "$file" ]]; then
|
|
if sudo rm -f "$file" 2>/dev/null; then
|
|
log_info "Removed $(basename "$file")"
|
|
else
|
|
log_warning "Failed to remove $(basename "$file")"
|
|
fi
|
|
fi
|
|
done
|
|
|
|
# Force WAL mode back on for consistency
|
|
if sudo "$PLEX_SQLITE" "$db_file" "PRAGMA journal_mode=WAL;" 2>/dev/null | grep -q "wal"; then
|
|
log_success "WAL mode restored for $(basename "$db_file")"
|
|
else
|
|
log_warning "Failed to restore WAL mode for $(basename "$db_file")"
|
|
fi
|
|
;;
|
|
|
|
"restore")
|
|
log_message "Restoring WAL files after failed repair"
|
|
|
|
# Restore WAL/SHM backup files if they exist
|
|
for file in "$wal_file" "$shm_file"; do
|
|
local backup_file="${file}.repair-backup"
|
|
if [[ -f "$backup_file" ]]; then
|
|
if sudo mv "$backup_file" "$file" 2>/dev/null; then
|
|
log_info "Restored $(basename "$file")"
|
|
else
|
|
log_warning "Failed to restore $(basename "$file")"
|
|
fi
|
|
fi
|
|
done
|
|
;;
|
|
esac
|
|
}
|
|
|
|
# Check database integrity using Plex SQLite
|
|
check_database_integrity() {
|
|
local db_file="$1"
|
|
local db_name
|
|
db_name=$(basename "$db_file")
|
|
|
|
log_message "Checking database integrity: $db_name"
|
|
|
|
if ! check_plex_sqlite; then
|
|
return 2
|
|
fi
|
|
|
|
# Check if WAL file exists and handle it
|
|
local wal_file="${db_file}-wal"
|
|
if [[ -f "$wal_file" ]]; then
|
|
log_info "WAL file found for $db_name, performing checkpoint..."
|
|
if sudo "$PLEX_SQLITE" "$db_file" "PRAGMA wal_checkpoint(FULL);" 2>/dev/null; then
|
|
log_success "WAL checkpoint completed for $db_name"
|
|
else
|
|
log_warning "WAL checkpoint failed for $db_name, proceeding with integrity check"
|
|
fi
|
|
fi
|
|
|
|
# Run integrity check
|
|
local integrity_result
|
|
integrity_result=$(sudo "$PLEX_SQLITE" "$db_file" "PRAGMA integrity_check;" 2>&1)
|
|
local check_exit_code=$?
|
|
|
|
if [[ $check_exit_code -ne 0 ]]; then
|
|
log_error "Failed to run integrity check on $db_name: $integrity_result"
|
|
return 2
|
|
fi
|
|
|
|
if echo "$integrity_result" | grep -q "^ok$"; then
|
|
log_success "Database integrity check passed: $db_name"
|
|
return 0
|
|
else
|
|
log_warning "Database integrity issues detected in $db_name:"
|
|
echo "$integrity_result" | while read -r line; do
|
|
log_warning " $line"
|
|
done
|
|
return 1
|
|
fi
|
|
}
|
|
|
|
# Strategy 1: Dump and restore approach with enhanced validation
|
|
attempt_dump_restore() {
|
|
local working_copy="$1"
|
|
local original_db="$2"
|
|
local timestamp="$3"
|
|
local dump_file="${original_db}.dump-${timestamp}.sql"
|
|
local new_db="${original_db}.repaired-${timestamp}"
|
|
|
|
log_message "Attempting repair via SQL dump/restore..."
|
|
|
|
# Try to dump the database with error checking
|
|
log_info "Creating database dump..."
|
|
if sudo "$PLEX_SQLITE" "$working_copy" ".dump" 2>/dev/null | sudo tee "$dump_file" >/dev/null; then
|
|
# Validate the dump file exists and has substantial content
|
|
if [[ ! -f "$dump_file" ]]; then
|
|
log_warning "Dump file was not created"
|
|
return 1
|
|
fi
|
|
|
|
local dump_size
|
|
dump_size=$(stat -c%s "$dump_file" 2>/dev/null || echo "0")
|
|
if [[ "$dump_size" -lt 1024 ]]; then
|
|
log_warning "Dump file is too small ($dump_size bytes) - likely incomplete"
|
|
sudo rm -f "$dump_file"
|
|
return 1
|
|
fi
|
|
|
|
# Check for essential database structures in dump
|
|
if ! grep -q "CREATE TABLE" "$dump_file" 2>/dev/null; then
|
|
log_warning "Dump file contains no CREATE TABLE statements - dump is incomplete"
|
|
sudo rm -f "$dump_file"
|
|
return 1
|
|
fi
|
|
|
|
# Check for critical Plex tables
|
|
local critical_tables=("schema_migrations" "accounts" "library_sections")
|
|
local missing_tables=()
|
|
for table in "${critical_tables[@]}"; do
|
|
if ! grep -q "CREATE TABLE.*$table" "$dump_file" 2>/dev/null; then
|
|
missing_tables+=("$table")
|
|
fi
|
|
done
|
|
|
|
if [[ ${#missing_tables[@]} -gt 0 ]]; then
|
|
log_warning "Dump is missing critical tables: ${missing_tables[*]}"
|
|
log_warning "This would result in an incomplete database - aborting dump/restore"
|
|
sudo rm -f "$dump_file"
|
|
return 1
|
|
fi
|
|
|
|
log_success "Database dumped successfully (${dump_size} bytes)"
|
|
log_info "Dump contains all critical tables: ${critical_tables[*]}"
|
|
|
|
# Create new database from dump
|
|
log_info "Creating new database from validated dump..."
|
|
if sudo cat "$dump_file" | sudo "$PLEX_SQLITE" "$new_db" 2>/dev/null; then
|
|
# Verify the new database was created and has content
|
|
if [[ ! -f "$new_db" ]]; then
|
|
log_warning "New database file was not created"
|
|
sudo rm -f "$dump_file"
|
|
return 1
|
|
fi
|
|
|
|
local new_db_size
|
|
new_db_size=$(stat -c%s "$new_db" 2>/dev/null || echo "0")
|
|
if [[ "$new_db_size" -lt 1048576 ]]; then # Less than 1MB
|
|
log_warning "New database is too small ($new_db_size bytes) - likely empty or incomplete"
|
|
sudo rm -f "$new_db" "$dump_file"
|
|
return 1
|
|
fi
|
|
|
|
# Verify critical tables exist in new database
|
|
local table_count
|
|
table_count=$(sudo "$PLEX_SQLITE" "$new_db" "SELECT COUNT(*) FROM sqlite_master WHERE type='table';" 2>/dev/null || echo "0")
|
|
if [[ "$table_count" -lt 50 ]]; then # Plex should have way more than 50 tables
|
|
log_warning "New database has too few tables ($table_count) - likely incomplete"
|
|
sudo rm -f "$new_db" "$dump_file"
|
|
return 1
|
|
fi
|
|
|
|
# Verify schema_migrations table specifically (this was the root cause)
|
|
if ! sudo "$PLEX_SQLITE" "$new_db" "SELECT COUNT(*) FROM schema_migrations;" >/dev/null 2>&1; then
|
|
log_warning "New database missing schema_migrations table - Plex will not start"
|
|
sudo rm -f "$new_db" "$dump_file"
|
|
return 1
|
|
fi
|
|
|
|
log_success "New database created from dump ($new_db_size bytes, $table_count tables)"
|
|
|
|
# Verify the new database passes integrity check
|
|
log_info "Performing integrity check on repaired database..."
|
|
if sudo "$PLEX_SQLITE" "$new_db" "PRAGMA integrity_check;" 2>/dev/null | grep -q "ok"; then
|
|
log_success "New database passes integrity check"
|
|
|
|
# Replace original with repaired version
|
|
log_info "Replacing original database with repaired version..."
|
|
if sudo mv "$new_db" "$original_db"; then
|
|
# Force filesystem sync to prevent corruption
|
|
sync
|
|
# Fix ownership to plex user
|
|
sudo chown plex:plex "$original_db"
|
|
sudo rm -f "$dump_file"
|
|
return 0
|
|
else
|
|
sudo rm -f "$dump_file"
|
|
return 1
|
|
fi
|
|
else
|
|
log_warning "Repaired database failed integrity check"
|
|
sudo rm -f "$new_db" "$dump_file"
|
|
return 1
|
|
fi
|
|
else
|
|
log_warning "Failed to create database from dump - SQL import failed"
|
|
sudo rm -f "$dump_file"
|
|
return 1
|
|
fi
|
|
else
|
|
log_warning "Failed to dump corrupted database - dump command failed"
|
|
# Clean up any potentially created but empty dump file
|
|
sudo rm -f "$dump_file"
|
|
return 1
|
|
fi
|
|
}
|
|
|
|
# Strategy 2: Schema recreation with data recovery
|
|
attempt_schema_recreation() {
|
|
local working_copy="$1"
|
|
local original_db="$2"
|
|
local timestamp="$3"
|
|
local schema_file="${original_db}.schema-${timestamp}.sql"
|
|
local new_db="${original_db}.rebuilt-${timestamp}"
|
|
|
|
log_message "Attempting repair via schema recreation..."
|
|
|
|
# Extract schema
|
|
if sudo "$PLEX_SQLITE" "$working_copy" ".schema" 2>/dev/null | sudo tee "$schema_file" >/dev/null; then
|
|
log_success "Schema extracted"
|
|
|
|
# Create new database with schema
|
|
if sudo cat "$schema_file" | sudo "$PLEX_SQLITE" "$new_db" 2>/dev/null; then
|
|
log_success "New database created with schema"
|
|
|
|
# Try to recover data table by table
|
|
if recover_table_data "$working_copy" "$new_db"; then
|
|
log_success "Data recovery completed"
|
|
|
|
# Verify the rebuilt database
|
|
if sudo "$PLEX_SQLITE" "$new_db" "PRAGMA integrity_check;" 2>/dev/null | grep -q "ok"; then
|
|
if sudo mv "$new_db" "$original_db"; then
|
|
sync
|
|
# Fix ownership to plex user
|
|
sudo chown plex:plex "$original_db"
|
|
sudo rm -f "$schema_file"
|
|
return 0
|
|
fi
|
|
else
|
|
log_warning "Rebuilt database failed integrity check"
|
|
fi
|
|
fi
|
|
fi
|
|
|
|
sudo rm -f "$new_db" "$schema_file"
|
|
fi
|
|
|
|
return 1
|
|
}
|
|
|
|
# Strategy 3: Recovery from previous backup
|
|
attempt_backup_recovery() {
|
|
local original_db="$1"
|
|
local backup_dir="$2"
|
|
local current_backup="$3"
|
|
|
|
log_message "Attempting recovery from previous backup..."
|
|
|
|
# Find the most recent backup that's not the current corrupted one
|
|
local latest_backup
|
|
if [[ -n "$current_backup" ]]; then
|
|
# Exclude the current backup from consideration
|
|
latest_backup=$(find "$backup_dir" -name "plex-backup-*.tar.gz" -type f ! -samefile "$current_backup" -printf '%T@ %p\n' 2>/dev/null | sort -nr | head -1 | cut -d' ' -f2-)
|
|
else
|
|
latest_backup=$(find "$backup_dir" -name "plex-backup-*.tar.gz" -type f -printf '%T@ %p\n' 2>/dev/null | sort -nr | head -1 | cut -d' ' -f2-)
|
|
fi
|
|
|
|
if [[ -n "$latest_backup" && -f "$latest_backup" ]]; then
|
|
log_message "Found recent backup: $(basename "$latest_backup")"
|
|
|
|
local temp_restore_dir="/tmp/plex-restore-$$"
|
|
mkdir -p "$temp_restore_dir"
|
|
|
|
# Extract the backup
|
|
if tar -xzf "$latest_backup" -C "$temp_restore_dir" 2>/dev/null; then
|
|
local restored_db
|
|
restored_db="${temp_restore_dir}/$(basename "$original_db")"
|
|
|
|
if [[ -f "$restored_db" ]]; then
|
|
# Verify the restored database
|
|
if sudo "$PLEX_SQLITE" "$restored_db" "PRAGMA integrity_check;" 2>/dev/null | grep -q "ok"; then
|
|
if sudo cp "$restored_db" "$original_db"; then
|
|
sync
|
|
# Fix ownership to plex user
|
|
sudo chown plex:plex "$original_db"
|
|
rm -rf "$temp_restore_dir"
|
|
return 0
|
|
fi
|
|
else
|
|
log_warning "Backup database also corrupted"
|
|
fi
|
|
fi
|
|
fi
|
|
|
|
rm -rf "$temp_restore_dir"
|
|
fi
|
|
|
|
return 1
|
|
}
|
|
|
|
# Recovery helper for table data
|
|
recover_table_data() {
|
|
local source_db="$1"
|
|
local target_db="$2"
|
|
|
|
# Get list of tables
|
|
local tables
|
|
tables=$(sudo "$PLEX_SQLITE" "$source_db" ".tables" 2>/dev/null)
|
|
|
|
if [[ -z "$tables" ]]; then
|
|
log_warning "No tables found in source database"
|
|
return 1
|
|
fi
|
|
|
|
local recovered_count=0
|
|
local total_tables=0
|
|
|
|
for table in $tables; do
|
|
((total_tables++))
|
|
|
|
# Try to copy data from each table
|
|
if sudo "$PLEX_SQLITE" "$source_db" ".mode insert $table" ".output | sudo tee /tmp/table_data_$$.sql > /dev/null" "SELECT * FROM $table;" ".output stdout" 2>/dev/null && \
|
|
sudo cat "/tmp/table_data_$$.sql" | sudo "$PLEX_SQLITE" "$target_db" 2>/dev/null; then
|
|
((recovered_count++))
|
|
sudo rm -f "/tmp/table_data_$$.sql" 2>/dev/null || true
|
|
else
|
|
log_warning "Failed to recover data from table: $table"
|
|
sudo rm -f "/tmp/table_data_$$.sql" 2>/dev/null || true
|
|
fi
|
|
done
|
|
|
|
log_message "Recovered $recovered_count/$total_tables tables"
|
|
|
|
# Consider successful if we recovered at least 80% of tables
|
|
if [[ "$total_tables" -eq 0 ]]; then
|
|
log_warning "No tables found for recovery"
|
|
return 1
|
|
fi
|
|
|
|
if (( recovered_count * 100 / total_tables >= 80 )); then
|
|
return 0
|
|
fi
|
|
|
|
return 1
|
|
}
|
|
|
|
# Cleanup helper function
|
|
cleanup_repair_files() {
|
|
local pre_repair_backup="$1"
|
|
local working_copy="$2"
|
|
|
|
if [[ -n "$pre_repair_backup" && -f "$pre_repair_backup" ]]; then
|
|
sudo rm -f "$pre_repair_backup" 2>/dev/null || true
|
|
fi
|
|
|
|
if [[ -n "$working_copy" && -f "$working_copy" ]]; then
|
|
sudo rm -f "$working_copy" 2>/dev/null || true
|
|
fi
|
|
}
|
|
|
|
# Enhanced database repair with multiple recovery strategies
|
|
repair_database() {
|
|
local db_file="$1"
|
|
local force_repair="${2:-false}"
|
|
local db_name
|
|
db_name=$(basename "$db_file")
|
|
local timestamp
|
|
timestamp=$(date "+%Y-%m-%d_%H.%M.%S")
|
|
|
|
if ! check_plex_sqlite; then
|
|
return 2
|
|
fi
|
|
|
|
log_message "Attempting to repair corrupted database: $db_name"
|
|
|
|
# Enhanced WAL file handling for repair
|
|
handle_wal_files_for_repair "$db_file" "prepare"
|
|
|
|
# Create multiple backup copies before attempting repair
|
|
local pre_repair_backup="${db_file}.pre-repair-backup"
|
|
local working_copy="${db_file}.working-${timestamp}"
|
|
|
|
if ! sudo cp "$db_file" "$pre_repair_backup"; then
|
|
log_error "Failed to create pre-repair backup"
|
|
handle_wal_files_for_repair "$db_file" "restore"
|
|
return 2
|
|
fi
|
|
# Force filesystem sync to prevent corruption
|
|
sync
|
|
|
|
if ! sudo cp "$db_file" "$working_copy"; then
|
|
log_error "Failed to create working copy"
|
|
handle_wal_files_for_repair "$db_file" "restore"
|
|
return 2
|
|
fi
|
|
# Force filesystem sync to prevent corruption
|
|
sync
|
|
|
|
log_success "Created pre-repair backup: $(basename "$pre_repair_backup")"
|
|
|
|
# Strategy 1: Try dump and restore approach
|
|
log_message "Step 1: Database cleanup and optimization..."
|
|
if attempt_dump_restore "$working_copy" "$db_file" "$timestamp"; then
|
|
log_success "Database repaired using dump/restore method"
|
|
handle_wal_files_for_repair "$db_file" "cleanup"
|
|
cleanup_repair_files "$pre_repair_backup" "$working_copy"
|
|
return 0
|
|
fi
|
|
|
|
# Strategy 2: Try schema recreation
|
|
if attempt_schema_recreation "$working_copy" "$db_file" "$timestamp"; then
|
|
log_success "Database repaired using schema recreation"
|
|
handle_wal_files_for_repair "$db_file" "cleanup"
|
|
cleanup_repair_files "$pre_repair_backup" "$working_copy"
|
|
return 0
|
|
fi
|
|
|
|
# Strategy 3: Try recovery from previous backup
|
|
if attempt_backup_recovery "$db_file" "$BACKUP_ROOT" "$pre_repair_backup"; then
|
|
log_success "Database recovered from previous backup"
|
|
handle_wal_files_for_repair "$db_file" "cleanup"
|
|
cleanup_repair_files "$pre_repair_backup" "$working_copy"
|
|
return 0
|
|
fi
|
|
|
|
# All strategies failed - restore original and flag for manual intervention
|
|
log_error "Database repair failed. Restoring original..."
|
|
if sudo cp "$pre_repair_backup" "$db_file"; then
|
|
# Force filesystem sync to prevent corruption
|
|
sync
|
|
log_success "Original database restored"
|
|
handle_wal_files_for_repair "$db_file" "restore"
|
|
else
|
|
log_error "Failed to restore original database!"
|
|
handle_wal_files_for_repair "$db_file" "restore"
|
|
cleanup_repair_files "$pre_repair_backup" "$working_copy"
|
|
return 2
|
|
fi
|
|
|
|
log_error "Database repair failed for $db_name"
|
|
log_warning "Will backup corrupted database - manual intervention may be needed"
|
|
cleanup_repair_files "$pre_repair_backup" "$working_copy"
|
|
return 1
|
|
}
|
|
|
|
# Main function
|
|
main() {
|
|
local action="$1"
|
|
local db_file="$2"
|
|
local force_repair=false
|
|
|
|
# Parse arguments
|
|
case "$action" in
|
|
"check")
|
|
if [[ -z "$db_file" ]]; then
|
|
log_error "Usage: $0 check <database_file>"
|
|
exit 2
|
|
fi
|
|
|
|
if [[ ! -f "$db_file" ]]; then
|
|
log_error "Database file not found: $db_file"
|
|
exit 2
|
|
fi
|
|
|
|
check_database_integrity "$db_file"
|
|
exit $?
|
|
;;
|
|
"repair")
|
|
if [[ -z "$db_file" ]]; then
|
|
log_error "Usage: $0 repair <database_file>"
|
|
exit 2
|
|
fi
|
|
|
|
if [[ ! -f "$db_file" ]]; then
|
|
log_error "Database file not found: $db_file"
|
|
exit 2
|
|
fi
|
|
|
|
repair_database "$db_file" "$force_repair"
|
|
exit $?
|
|
;;
|
|
"force-repair")
|
|
force_repair=true
|
|
if [[ -z "$db_file" ]]; then
|
|
log_error "Usage: $0 force-repair <database_file>"
|
|
exit 2
|
|
fi
|
|
|
|
if [[ ! -f "$db_file" ]]; then
|
|
log_error "Database file not found: $db_file"
|
|
exit 2
|
|
fi
|
|
|
|
repair_database "$db_file" "$force_repair"
|
|
exit $?
|
|
;;
|
|
*)
|
|
echo "Usage: $0 {check|repair|force-repair} <database_file>"
|
|
echo ""
|
|
echo "Commands:"
|
|
echo " check <db_file> Check database integrity only"
|
|
echo " repair <db_file> Attempt to repair corrupted database"
|
|
echo " force-repair <db_file> Force repair without prompts"
|
|
echo ""
|
|
echo "Exit codes:"
|
|
echo " 0 - Success (database is healthy or successfully repaired)"
|
|
echo " 1 - Database has issues but repair failed"
|
|
echo " 2 - Critical error (cannot access database or repair tools)"
|
|
exit 2
|
|
;;
|
|
esac
|
|
}
|
|
|
|
# Run main function if script is executed directly
|
|
if [[ "${BASH_SOURCE[0]}" == "${0}" ]]; then
|
|
main "$@"
|
|
fi
|