Compare commits

...

31 Commits

Author SHA1 Message Date
Peter Wood
88d1bf6878 fix(backup-log-monitor): improve log status display and filter out empty journalctl messages 2026-03-26 10:12:31 -04:00
Peter Wood
56d35fa3ee added crontab entry for karakeep 2026-03-26 14:10:38 +00:00
Peter Wood
715adb2bd0 fix(restore-karakeep): streamline docker compose command syntax for stopping and starting containers 2026-03-26 11:26:45 +00:00
Peter Wood
22c29a1610 feat(backup-golinks): add backup script for golinks export with NAS support and cleanup logic 2026-03-26 11:11:32 +00:00
Peter Wood
f7a9f92493 fix(restore-karakeep): gzip -t verification, fix compose pipe exit codes, dotfile-safe clear, add --compose-dir flag 2026-03-25 21:33:41 -04:00
Peter Wood
bd1d142800 fix(backup-env-files): remove duplicate SCRIPT_DIR, add trap - ERR in cleanup to prevent re-entry 2026-03-25 21:25:20 -04:00
Peter Wood
74a2453edc fix(backup-gitea): timestamped log, safe .env loading, fix $? anti-pattern, add archive verification and NAS log copy 2026-03-25 21:23:56 -04:00
Peter Wood
5f7c0ad866 fix(backup-docker): timestamped log, add archive gzip verification, add 30-day rotation 2026-03-25 21:22:05 -04:00
Peter Wood
58d1762815 fix(backup-log-monitor): add karakeep tag, fix health check script and dir paths 2026-03-25 21:19:44 -04:00
Peter Wood
2065c6f6f3 fix(backup-calibre): use SCRIPT_DIR log path, fix rsync exit code capture, add set -e 2026-03-25 21:15:43 -04:00
Peter Wood
d2e6d9ff05 feat: Add backup script for Karakeep services with logging and NAS support 2026-03-25 21:05:19 -04:00
Peter Wood
33d64041ff feat: Update color code definitions for consistency across scripts 2026-03-12 11:18:36 -04:00
Peter Wood
70ec810f58 feat: Add initial implementation of Plex Management TUI with backend support and styling 2026-03-12 11:18:25 -04:00
Peter Wood
9196879b6c feat: Integrate Plex API for library scanning, metadata refresh, and media analysis 2026-03-07 11:37:49 -05:00
Peter Wood
dc8d35f593 feat: Enhance Plex library management with API integration and improved scanning functions 2026-03-07 11:25:20 -05:00
Peter Wood
c3af84b3e6 feat: Add DBRepair.sh to .gitignore to prevent tracking of compiled binaries 2026-03-07 11:25:13 -05:00
Peter Wood
edae8513d1 feat: Improve help command formatting for better readability 2026-03-07 11:01:20 -05:00
Peter Wood
8e7e22a1a3 feat: Refactor help command output for improved readability and formatting 2026-03-07 10:59:00 -05:00
Peter Wood
2bae9bc6ce feat: Add DBRepair installation and management features to Plex scripts 2026-03-07 10:56:39 -05:00
Peter Wood
9bb99aecbf feat: Enhance database integrity checks and repair functionality across scripts 2026-03-07 10:42:41 -05:00
Peter Wood
ddaa641668 feat: Improve database integrity check and startup handling in plex.sh 2026-03-07 10:29:14 -05:00
Peter Wood
b47f58fad7 removed summary markdown 2026-03-07 10:29:07 -05:00
Peter Wood
4ce77211b5 feat: Add section separator in list output when showing updates 2026-01-15 06:40:31 -05:00
Peter Wood
e3b89032d4 Merge branch 'main' of github.com:acedanger/shell 2026-01-15 06:36:04 -05:00
Peter Wood
06ee3dd672 feat: Enhance list command to support filtering by update availability 2026-01-15 06:36:02 -05:00
Peter Wood
07d9cc825f Merge branch 'main' of https://github.com/acedanger/shell 2025-12-28 17:17:32 -08:00
Peter Wood
6760b74b97 feat: Ensure console cursor visibility after script execution 2025-12-29 01:10:12 +00:00
Peter Wood
5b31d616f6 feat: Enhance backup script with upload-only mode and improved error logging 2025-12-16 17:25:15 -05:00
Peter Wood
33b1594bdf added pangolin update shell script 2025-12-13 19:48:57 -08:00
Peter Wood
ebe9644701 feat: Add repair script for Jellyfin database with backup and integrity check functionality 2025-12-13 18:50:02 -05:00
Peter Wood
bb704385fc feat: Add log_status function to improve logging in backup script 2025-12-13 18:49:50 -05:00
32 changed files with 4280 additions and 587 deletions

1
.gitignore vendored
View File

@@ -43,3 +43,4 @@ dotfiles/my-aliases.zsh
# Compiled binaries
tui/tui
plex/DBRepair.sh

View File

@@ -3,10 +3,17 @@
# Calibre Library Backup Script
# This script backs up the Calibre Library to a network share
set -e
# Configuration
SCRIPT_DIR="$(dirname "$(readlink -f "$0")")"
SOURCE_DIR="/home/acedanger/Calibre Library/"
DEST_DIR="/mnt/share/media/books/"
LOG_FILE="/var/log/calibre-backup.log"
LOG_DIR="$SCRIPT_DIR/logs"
LOG_FILE="$LOG_DIR/calibre-backup.log"
# Ensure log directory exists
mkdir -p "$LOG_DIR"
# Function to log messages with timestamp
log_message() {
@@ -32,10 +39,17 @@ log_message "Source: $SOURCE_DIR"
log_message "Destination: $DEST_DIR"
# Perform the backup using rsync
if rsync -rtvp --delete --exclude="*.tmp" "$SOURCE_DIR" "$DEST_DIR" 2>&1 | tee -a "$LOG_FILE"; then
# Use a temp file to capture output while preserving rsync's exit code
RSYNC_TMP=$(mktemp)
if rsync -rtvp --delete --exclude="*.tmp" "$SOURCE_DIR" "$DEST_DIR" > "$RSYNC_TMP" 2>&1; then
cat "$RSYNC_TMP" | tee -a "$LOG_FILE"
log_message "Backup completed successfully"
rm -f "$RSYNC_TMP"
exit 0
else
log_message "ERROR: Backup failed with exit code $?"
exit 1
RSYNC_EXIT=$?
cat "$RSYNC_TMP" | tee -a "$LOG_FILE"
log_message "ERROR: Backup failed with exit code $RSYNC_EXIT"
rm -f "$RSYNC_TMP"
exit $RSYNC_EXIT
fi

View File

@@ -26,17 +26,15 @@ BLUE='\033[0;34m'
NC='\033[0m' # No Color
# Configuration
BACKUP_TIMESTAMP=$(date +%Y%m%d_%H%M%S)
BACKUP_ROOT="/home/acedanger/backup/docker-data"
LOG_FILE="$SCRIPT_DIR/logs/docker-backup.log"
LOG_FILE="$SCRIPT_DIR/logs/docker-backup-${BACKUP_TIMESTAMP}.log"
NOTIFICATION_URL="https://notify.peterwood.rocks/lab"
# Container definitions: container_name:volume_path:description
declare -A CONTAINERS=(
["vaultwarden"]="/var/lib/docker/volumes/vaultwarden_data/_data:Password manager data"
["uptime-kuma"]="/var/lib/docker/volumes/uptime-kuma/_data:Uptime monitoring data"
# ["paperless-ng"]="/var/lib/docker/volumes/paperless-ng_data/_data:Document management data"
# ["paperless-media"]="/var/lib/docker/volumes/paperless-ng_media/_data:Document media files"
# ["paperless-pgdata"]="/var/lib/docker/volumes/paperless-ng_pgdata/_data:PostgreSQL database"
)
# Ensure directories exist
@@ -166,9 +164,18 @@ backup_container_volume() {
fi
if tar -czf "$backup_file" -C "$(dirname "$volume_path")" "$(basename "$volume_path")" 2>/dev/null; then
# Verify archive integrity
if ! gzip -t "$backup_file" 2>/dev/null; then
log "Error: Archive verification failed for $(basename "$backup_file")"
rm -f "$backup_file"
if [ "$was_running" = true ]; then
start_container "$container" || true
fi
return 1
fi
local backup_size
backup_size=$(du -h "$backup_file" | cut -f1)
log "Backup completed successfully: $(basename "$backup_file") ($backup_size)"
log "Backup completed and verified: $(basename "$backup_file") ($backup_size)"
# Track file completion in metrics
if [[ "$METRICS_ENABLED" == "true" ]]; then
@@ -322,6 +329,10 @@ main() {
send_notification "failed" "Docker backup completed with errors: $failed_backups failed, $successful_backups succeeded" "${failed_containers[*]}"
fi
# Rotate old archives (keep last 30 days)
log "Cleaning up old backup archives..."
find "$BACKUP_ROOT" -name "*-data-bk-*.tar.gz" -mtime +30 -delete 2>/dev/null || true
# Finalize metrics
if [[ "$METRICS_ENABLED" == "true" ]]; then
cleanup

View File

@@ -26,7 +26,6 @@ BLUE='\033[0;34m'
NC='\033[0m' # No Color
# Configuration
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
DOCKER_DIR="$HOME/docker"
BACKUP_REPO_NAME="docker-env-backup"
BACKUP_DIR="$HOME/.env-backup"
@@ -241,6 +240,7 @@ EOF
# Cleanup function for metrics finalization
cleanup() {
trap - ERR
if [[ "$METRICS_ENABLED" == "true" ]]; then
if [[ -n "$1" && "$1" == "error" ]]; then
metrics_backup_complete "failed" "Backup failed during execution"

View File

@@ -21,9 +21,10 @@ COMPOSE_DIR="/home/acedanger/docker/gitea"
BACKUP_DIR="/home/acedanger/backups/gitea"
NAS_DIR="/mnt/share/media/backups/gitea"
NAS_LOG_DIR="/mnt/share/media/backups/logs"
COMPOSE_FILE="$COMPOSE_DIR/docker-compose.yml"
LOG_FILE="$SCRIPT_DIR/logs/gitea-backup.log"
DATE=$(date +%Y%m%d_%H%M%S)
LOG_FILE="$SCRIPT_DIR/logs/gitea-backup-${DATE}.log"
# Ensure directories exist
mkdir -p "$(dirname "$LOG_FILE")"
@@ -31,7 +32,10 @@ mkdir -p "$BACKUP_DIR"
# Load .env variables from the COMPOSE_DIR to ensure DB credentials match
if [ -f "$COMPOSE_DIR/.env" ]; then
export $(grep -v '^#' "$COMPOSE_DIR/.env" | xargs)
# shellcheck source=/dev/null
set -a
source "$COMPOSE_DIR/.env"
set +a
fi
# Logging function (Fixed to interpret colors correctly)
@@ -143,6 +147,14 @@ perform_backup() {
# Tar the temp folder into one final file
tar -czf "$BACKUP_DIR/$FINAL_ARCHIVE_NAME" -C "$TEMP_BACKUP_PATH" .
# Verify archive integrity
if ! gzip -t "$BACKUP_DIR/$FINAL_ARCHIVE_NAME" 2>/dev/null; then
log "${RED}Error: Archive verification failed for $FINAL_ARCHIVE_NAME${NC}"
rm -f "$BACKUP_DIR/$FINAL_ARCHIVE_NAME"
rm -rf "$TEMP_BACKUP_PATH"
exit 1
fi
# Remove temp folder
rm -rf "$TEMP_BACKUP_PATH"
@@ -152,8 +164,7 @@ perform_backup() {
if [[ "$SKIP_NAS" != "true" ]]; then
if [ -d "$NAS_DIR" ]; then
log "Copying to NAS ($NAS_DIR)..."
cp "$BACKUP_DIR/$FINAL_ARCHIVE_NAME" "$NAS_DIR/"
if [ $? -eq 0 ]; then
if cp "$BACKUP_DIR/$FINAL_ARCHIVE_NAME" "$NAS_DIR/"; then
log "${GREEN}NAS Copy Successful.${NC}"
else
log "${RED}NAS Copy Failed. Check permissions on $NAS_DIR${NC}"
@@ -170,6 +181,14 @@ perform_backup() {
log "Cleanup of old local backups complete."
}
# Copy log file to NAS logs directory
copy_logs_to_nas() {
if [[ -f "$LOG_FILE" && -d "$NAS_LOG_DIR" ]]; then
cp "$LOG_FILE" "$NAS_LOG_DIR/" 2>/dev/null || \
log "${YELLOW}Warning: Could not copy log to NAS logs directory${NC}"
fi
}
# Function to generate the restore script
create_restore_script() {
local TARGET_DIR=$1
@@ -211,6 +230,7 @@ check_dependencies
# Parse Arguments
if [ $# -eq 0 ]; then
perform_backup "false"
copy_logs_to_nas
exit 0
fi
@@ -218,7 +238,7 @@ while [[ "$#" -gt 0 ]]; do
case $1 in
-h|--help) usage; exit 0 ;;
-l|--list) list_backups; exit 0 ;;
-n|--no-nas) perform_backup "true"; exit 0 ;;
-n|--no-nas) perform_backup "true"; copy_logs_to_nas; exit 0 ;;
*) echo "Unknown parameter: $1"; usage; exit 1 ;;
esac
shift

141
backup-golinks.sh Executable file
View File

@@ -0,0 +1,141 @@
#!/bin/bash
# backup-golinks.sh - Backup golinks export
# Enhanced for NAS support and consistency
set -e
# Colors for output
GREEN='\033[0;32m'
YELLOW='\033[0;33m'
RED='\033[0;31m'
BLUE='\033[0;34m'
NC='\033[0m' # No Color
# ==========================================
# 1. CONFIGURATION
# ==========================================
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
BACKUP_DIR="/home/acedanger/backups/golinks"
NAS_DIR="/mnt/share/media/backups/golinks"
GOLINKS_URL="http://go/.export"
LOG_FILE="$SCRIPT_DIR/logs/golinks-backup.log"
DATE=$(date +%Y%m%d_%H%M%S)
BACKUP_FILENAME="golinks_backup_${DATE}.json"
# Ensure directories exist
mkdir -p "$(dirname "$LOG_FILE")"
mkdir -p "$BACKUP_DIR"
# Logging function
log() {
# Print to console with colors (interpreting escapes with -e)
echo -e "$(date --iso-8601=seconds) - $1"
# Strip colors for the log file to keep it clean
echo -e "$(date --iso-8601=seconds) - $1" | sed 's/\x1b\[[0-9;]*m//g' >> "$LOG_FILE"
}
# Display usage information
usage() {
echo "Usage: $0 [OPTIONS]"
echo ""
echo "Backup golinks export"
echo "Options:"
echo " -h, --help Show this help message"
echo " -l, --list List available local backups"
echo " -n, --no-nas Skip copying to NAS (Local only)"
echo ""
}
# Check dependencies
check_dependencies() {
if ! command -v curl &> /dev/null; then
log "${RED}Error: curl is not installed.${NC}"
exit 1
fi
}
# List available backups
list_backups() {
echo -e "${BLUE}=== Available Golinks Backups (Local) ===${NC}"
ls -lh "$BACKUP_DIR"/* 2>/dev/null || echo "No backups found."
}
# ==========================================
# 2. BACKUP LOGIC
# ==========================================
perform_backup() {
local SKIP_NAS=$1
log "Starting backup process..."
LOCAL_FILE="$BACKUP_DIR/$BACKUP_FILENAME"
# Download golinks export
log "${YELLOW}Downloading golinks export to: $LOCAL_FILE${NC}"
if curl -L -f "$GOLINKS_URL" -o "$LOCAL_FILE"; then
if [ ! -s "$LOCAL_FILE" ]; then
log "${RED}Error: Downloaded file is empty${NC}"
rm -f "$LOCAL_FILE"
exit 1
fi
log "${GREEN}Download completed successfully${NC}"
FILE_SIZE=$(du -h "$LOCAL_FILE" | cut -f1)
log "File size: $FILE_SIZE"
else
log "${RED}Error: Failed to download golinks export${NC}"
# Clean up empty file if it exists
[ -f "$LOCAL_FILE" ] && rm -f "$LOCAL_FILE"
exit 1
fi
# Copy to NAS if not skipped
if [ "$SKIP_NAS" = "false" ]; then
if [ ! -d "$NAS_DIR" ]; then
log "${YELLOW}NAS directory not found, creating: $NAS_DIR${NC}"
mkdir -p "$NAS_DIR" || log "${RED}Error: Failed to create NAS directory: $NAS_DIR${NC}"
fi
if [ -d "$NAS_DIR" ] && [ -w "$NAS_DIR" ]; then
log "${YELLOW}Copying backup to NAS: $NAS_DIR${NC}"
if cp "$LOCAL_FILE" "$NAS_DIR/"; then
log "${GREEN}Successfully copied to NAS${NC}"
else
log "${RED}Error: Failed to copy to NAS${NC}"
# Don't exit here, local backup is still good
fi
else
log "${YELLOW}Warning: NAS directory not accessible or writable: $NAS_DIR${NC}"
fi
else
log "${YELLOW}Skipping NAS copy as requested.${NC}"
fi
# Cleanup old local backups (Keep 30 days)
log "Cleaning up local backups older than 30 days..."
find "$BACKUP_DIR" -type f -name "golinks_backup_*" -mtime +30 -delete
log "${GREEN}Backup process finished.${NC}"
}
# ==========================================
# 3. EXECUTION FLOW
# ==========================================
check_dependencies
# Parse Arguments
if [ $# -eq 0 ]; then
perform_backup "false"
exit 0
fi
while [[ "$#" -gt 0 ]]; do
case $1 in
-h|--help) usage; exit 0 ;;
-l|--list) list_backups; exit 0 ;;
-n|--no-nas) perform_backup "true"; exit 0 ;;
*) echo "Unknown parameter: $1"; usage; exit 1 ;;
esac
shift
done

868
backup-karakeep.sh Executable file
View File

@@ -0,0 +1,868 @@
#!/bin/bash
set -e
# Load the unified backup metrics library
SCRIPT_DIR="$(dirname "$(readlink -f "$0")")"
LIB_DIR="$SCRIPT_DIR/lib"
if [[ -f "$LIB_DIR/unified-backup-metrics.sh" ]]; then
# shellcheck source=lib/unified-backup-metrics.sh
source "$LIB_DIR/unified-backup-metrics.sh"
METRICS_ENABLED=true
else
echo "Warning: Unified backup metrics library not found at $LIB_DIR/unified-backup-metrics.sh"
METRICS_ENABLED=false
fi
# Color codes for output
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
CYAN='\033[0;36m'
NC='\033[0m' # No Color
# Configuration
MAX_BACKUP_AGE_DAYS=30
MAX_BACKUPS_TO_KEEP=10
COMPOSE_DIR="/home/acedanger/docker/karakeep"
BACKUP_ROOT="/mnt/share/media/backups/karakeep"
LOCAL_BACKUP_DIR="/home/acedanger/backups/karakeep"
LOG_ROOT="${SCRIPT_DIR}/logs"
JSON_LOG_FILE="${SCRIPT_DIR}/logs/karakeep-backup.json"
PERFORMANCE_LOG_FILE="${SCRIPT_DIR}/logs/karakeep-backup-performance.json"
NAS_LOG_DIR="/mnt/share/media/backups/logs"
# Volume configuration: volume_name -> mount_path inside container
declare -A KARAKEEP_VOLUMES=(
["hoarder_data"]="/data"
["hoarder_meilisearch"]="/meili_data"
)
# Script options
VERIFY_BACKUPS=true
PERFORMANCE_MONITORING=true
WEBHOOK_URL="https://notify.peterwood.rocks/lab"
INTERACTIVE_MODE=false
DRY_RUN=false
STOP_CONTAINERS=true # Stop containers before backup for consistency
SKIP_NAS=false
# show help function
show_help() {
cat << EOF
Karakeep Services Backup Script
Usage: $0 [OPTIONS]
OPTIONS:
--dry-run Show what would be backed up without actually doing it
--no-verify Skip backup verification
--no-stop Do hot backup without stopping containers (less safe)
--no-nas Skip copying to NAS, keep local backups only
--interactive Ask for confirmation before each backup
--webhook URL Custom webhook URL for notifications
-h, --help Show this help message
EXAMPLES:
$0 # Run full backup with container stop/start
$0 --dry-run # Preview what would be backed up
$0 --no-stop # Hot backup without stopping containers
$0 --no-nas # Local backup only (skip NAS copy)
$0 --no-verify # Skip verification for faster backup
VOLUMES BACKED UP:
- hoarder_data (Karakeep app data: bookmarks, assets, database)
- hoarder_meilisearch (Meilisearch search index)
COMPOSE DIRECTORY:
$COMPOSE_DIR
EOF
}
# Parse command line arguments
while [[ $# -gt 0 ]]; do
case $1 in
--dry-run)
DRY_RUN=true
shift
;;
--no-verify)
VERIFY_BACKUPS=false
shift
;;
--no-stop)
STOP_CONTAINERS=false
shift
;;
--no-nas)
SKIP_NAS=true
shift
;;
--interactive)
INTERACTIVE_MODE=true
shift
;;
--webhook)
WEBHOOK_URL="$2"
shift 2
;;
-h|--help)
show_help
exit 0
;;
*)
echo "Unknown option: $1"
show_help
exit 1
;;
esac
done
# Timestamp for this backup run
BACKUP_TIMESTAMP=$(date +%Y%m%d_%H%M%S)
BACKUP_DEST="${LOCAL_BACKUP_DIR}/${BACKUP_TIMESTAMP}"
# Create necessary directories
mkdir -p "${LOG_ROOT}"
mkdir -p "${LOCAL_BACKUP_DIR}"
# Log files
LOG_FILE="${LOG_ROOT}/karakeep-backup-${BACKUP_TIMESTAMP}.log"
MARKDOWN_LOG="${LOG_ROOT}/karakeep-backup-${BACKUP_TIMESTAMP}.md"
# Logging functions
log_message() {
local message="$1"
local timestamp
timestamp=$(date '+%Y-%m-%d %H:%M:%S')
echo -e "${CYAN}[${timestamp}]${NC} ${message}"
echo "[${timestamp}] $message" >> "${LOG_FILE}" 2>/dev/null || true
}
log_error() {
local message="$1"
local timestamp
timestamp=$(date '+%Y-%m-%d %H:%M:%S')
echo -e "${RED}[${timestamp}] ERROR:${NC} ${message}" >&2
echo "[${timestamp}] ERROR: $message" >> "${LOG_FILE}" 2>/dev/null || true
}
log_success() {
local message="$1"
local timestamp
timestamp=$(date '+%Y-%m-%d %H:%M:%S')
echo -e "${GREEN}[${timestamp}] SUCCESS:${NC} ${message}"
echo "[${timestamp}] SUCCESS: $message" >> "${LOG_FILE}" 2>/dev/null || true
}
log_warning() {
local message="$1"
local timestamp
timestamp=$(date '+%Y-%m-%d %H:%M:%S')
echo -e "${YELLOW}[${timestamp}] WARNING:${NC} ${message}"
echo "[${timestamp}] WARNING: $message" >> "${LOG_FILE}" 2>/dev/null || true
}
log_info() {
local message="$1"
local timestamp
timestamp=$(date '+%Y-%m-%d %H:%M:%S')
echo -e "${BLUE}[${timestamp}] INFO:${NC} ${message}"
echo "[${timestamp}] INFO: $message" >> "${LOG_FILE}" 2>/dev/null || true
}
# Performance tracking
track_performance() {
if [ "$PERFORMANCE_MONITORING" != true ]; then
return 0
fi
local operation="$1"
local start_time="$2"
local end_time="${3:-$(date +%s)}"
local duration=$((end_time - start_time))
if [ ! -f "$PERFORMANCE_LOG_FILE" ]; then
echo "[]" > "$PERFORMANCE_LOG_FILE"
fi
if command -v jq > /dev/null 2>&1; then
local entry
entry=$(jq -n \
--arg timestamp "$(date -Iseconds)" \
--arg operation "$operation" \
--arg duration "$duration" \
--arg hostname "$(hostname)" \
'{
timestamp: $timestamp,
operation: $operation,
duration: ($duration | tonumber),
hostname: $hostname
}')
local lock_file="${PERFORMANCE_LOG_FILE}.lock"
local max_wait=10
local wait_count=0
while [ $wait_count -lt $max_wait ]; do
if (set -C; echo $$ > "$lock_file") 2>/dev/null; then
break
fi
sleep 0.1
((wait_count++))
done
if [ $wait_count -lt $max_wait ]; then
if jq --argjson entry "$entry" '. += [$entry]' "$PERFORMANCE_LOG_FILE" > "${PERFORMANCE_LOG_FILE}.tmp" 2>/dev/null; then
mv "${PERFORMANCE_LOG_FILE}.tmp" "$PERFORMANCE_LOG_FILE"
else
rm -f "${PERFORMANCE_LOG_FILE}.tmp"
fi
rm -f "$lock_file"
fi
fi
log_info "Performance: $operation completed in ${duration}s"
}
# Initialize JSON log file
initialize_json_log() {
if [ ! -f "${JSON_LOG_FILE}" ] || ! jq empty "${JSON_LOG_FILE}" 2>/dev/null; then
echo "{}" > "${JSON_LOG_FILE}"
log_message "Initialized JSON log file"
fi
}
# Log backup details with markdown formatting
log_file_details() {
local volume="$1"
local dest="$2"
local status="$3"
local size=""
local checksum=""
if [ "$status" == "SUCCESS" ] && [ -e "$dest" ]; then
size=$(du -sh "$dest" 2>/dev/null | cut -f1 || echo "Unknown")
if [ "$VERIFY_BACKUPS" == true ]; then
checksum=$(md5sum "$dest" 2>/dev/null | cut -d' ' -f1 || echo "N/A")
fi
else
size="N/A"
checksum="N/A"
fi
local markdown_lock="${MARKDOWN_LOG}.lock"
local max_wait=30
local wait_count=0
while [ $wait_count -lt $max_wait ]; do
if (set -C; echo $$ > "$markdown_lock") 2>/dev/null; then
break
fi
sleep 0.1
((wait_count++))
done
if [ $wait_count -lt $max_wait ]; then
{
echo "## Volume: $volume"
echo "- **Status**: $status"
echo "- **Destination**: $dest"
echo "- **Size**: $size"
echo "- **Checksum**: $checksum"
echo "- **Timestamp**: $(date '+%Y-%m-%d %H:%M:%S')"
echo ""
} >> "$MARKDOWN_LOG"
rm -f "$markdown_lock"
else
log_warning "Could not acquire markdown log lock for $volume"
fi
if command -v jq > /dev/null 2>&1; then
update_backup_log "$volume" "$dest" "$status" "$size" "$checksum"
fi
}
# Update backup log in JSON format
update_backup_log() {
local volume="$1"
local dest="$2"
local status="$3"
local size="$4"
local checksum="$5"
local timestamp
timestamp=$(date -Iseconds)
if ! command -v jq > /dev/null 2>&1; then
return 0
fi
local lock_file="${JSON_LOG_FILE}.lock"
local max_wait=30
local wait_count=0
while [ $wait_count -lt $max_wait ]; do
if (set -C; echo $$ > "$lock_file") 2>/dev/null; then
break
fi
sleep 0.1
((wait_count++))
done
if [ $wait_count -ge $max_wait ]; then
log_warning "Could not acquire lock for JSON log update"
return 1
fi
local entry
entry=$(jq -n \
--arg volume "$volume" \
--arg dest "$dest" \
--arg status "$status" \
--arg size "$size" \
--arg checksum "$checksum" \
--arg timestamp "$timestamp" \
'{
volume: $volume,
destination: $dest,
status: $status,
size: $size,
checksum: $checksum,
timestamp: $timestamp
}')
if jq --argjson entry "$entry" --arg volume "$volume" \
'.[$volume] = $entry' "$JSON_LOG_FILE" > "${JSON_LOG_FILE}.tmp" 2>/dev/null; then
mv "${JSON_LOG_FILE}.tmp" "$JSON_LOG_FILE"
else
rm -f "${JSON_LOG_FILE}.tmp"
fi
rm -f "$lock_file"
}
# Check if NAS mount is accessible
check_nas_mount() {
local mount_point="/mnt/share/media"
if ! mountpoint -q "$mount_point"; then
log_warning "NAS not mounted at $mount_point - backups will be local only"
return 1
fi
if [ ! -w "$(dirname "$BACKUP_ROOT")" ]; then
log_warning "No write access to NAS backup path: $BACKUP_ROOT"
return 1
fi
log_success "NAS mount check passed: $mount_point is accessible"
return 0
}
# Verify backup archive integrity
verify_backup() {
local volume="$1"
local archive="$2"
if [ "$VERIFY_BACKUPS" != true ]; then
return 0
fi
log_info "Verifying backup archive: $archive"
if [ ! -f "$archive" ]; then
log_error "Backup archive not found: $archive"
return 1
fi
local file_size
file_size=$(stat -c%s "$archive" 2>/dev/null || echo "0")
if [ "$file_size" -eq 0 ]; then
log_error "Backup archive is empty: $archive"
return 1
fi
# Verify gzip container integrity (reads entire file, checks CRC).
# gzip -t is more reliable than tar -tzf, which can exit non-zero on
# tar warnings (e.g. special files) even when the archive is valid.
if ! gzip -t "$archive" 2>/dev/null; then
log_error "Backup archive failed integrity check: $archive"
return 1
fi
log_success "Backup verification passed for $volume (${file_size} bytes, gzip integrity OK)"
return 0
}
# Check disk space at backup destination
check_disk_space() {
local destination="$1"
local required_space_mb="${2:-500}"
local available_space_kb
available_space_kb=$(df "$(dirname "$destination")" 2>/dev/null | awk 'NR==2 {print $4}' || echo "0")
local available_space_mb=$((available_space_kb / 1024))
if [ "$available_space_mb" -lt "$required_space_mb" ]; then
log_error "Insufficient disk space at $(dirname "$destination"). Available: ${available_space_mb}MB, Required: ${required_space_mb}MB"
return 1
fi
log_info "Disk space check passed at $(dirname "$destination"). Available: ${available_space_mb}MB"
return 0
}
# Check if Docker volume exists
check_volume_exists() {
local volume_name="$1"
if ! docker volume inspect "$volume_name" > /dev/null 2>&1; then
log_error "Docker volume '$volume_name' not found"
return 1
fi
return 0
}
# Backup a single named Docker volume to a tar.gz archive
backup_volume() {
local volume_name="$1"
local mount_path="${KARAKEEP_VOLUMES[$volume_name]}"
local archive="${BACKUP_DEST}/${volume_name}.tar.gz"
local backup_start_time
backup_start_time=$(date +%s)
log_message "Starting backup for volume: $volume_name (${mount_path})"
if [ "$DRY_RUN" == true ]; then
log_info "DRY RUN: Would backup volume $volume_name -> $archive"
log_file_details "$volume_name" "$archive" "DRY RUN"
return 0
fi
if [ "$INTERACTIVE_MODE" == true ]; then
echo -n "Backup volume $volume_name? (y/N): "
read -r response
if [[ ! "$response" =~ ^[Yy]$ ]]; then
log_info "Skipping $volume_name backup (user choice)"
return 0
fi
fi
# Confirm volume exists
if ! check_volume_exists "$volume_name"; then
log_file_details "$volume_name" "$archive" "FAILED - Volume not found"
return 1
fi
# Create destination directory
mkdir -p "$BACKUP_DEST"
log_info "Archiving volume $volume_name to $archive"
# Use a minimal Alpine container to tar up the volume contents
if docker run --rm \
--volume "${volume_name}:${mount_path}:ro" \
alpine \
tar czf - -C "$(dirname "$mount_path")" "$(basename "$mount_path")" \
> "$archive" 2>>"$LOG_FILE"; then
log_success "Volume archive created: $archive"
# File-level metrics tracking
if [[ "$METRICS_ENABLED" == "true" ]]; then
local file_size
file_size=$(stat -c%s "$archive" 2>/dev/null || echo "0")
local checksum
checksum=$(md5sum "$archive" 2>/dev/null | cut -d' ' -f1 || echo "")
metrics_add_file "$archive" "success" "$file_size" "$checksum"
fi
if verify_backup "$volume_name" "$archive"; then
log_file_details "$volume_name" "$archive" "SUCCESS"
track_performance "backup_${volume_name}" "$backup_start_time"
return 0
else
log_file_details "$volume_name" "$archive" "VERIFICATION_FAILED"
if [[ "$METRICS_ENABLED" == "true" ]]; then
local file_size
file_size=$(stat -c%s "$archive" 2>/dev/null || echo "0")
metrics_add_file "$archive" "failed" "$file_size" "" "Verification failed"
fi
return 1
fi
else
log_error "Failed to archive volume: $volume_name"
rm -f "$archive"
log_file_details "$volume_name" "$archive" "FAILED"
if [[ "$METRICS_ENABLED" == "true" ]]; then
metrics_add_file "$archive" "failed" "0" "" "Archive creation failed"
fi
return 1
fi
}
# Stop Karakeep containers before backup
stop_containers() {
log_message "Stopping Karakeep containers for consistent backup..."
if [ ! -f "$COMPOSE_DIR/docker-compose.yml" ]; then
log_error "docker-compose.yml not found at $COMPOSE_DIR"
return 1
fi
local compose_output
if ! compose_output=$(docker compose -f "$COMPOSE_DIR/docker-compose.yml" --progress plain stop 2>&1); then
echo "$compose_output" | tee -a "$LOG_FILE" > /dev/null
log_error "Failed to stop Karakeep containers"
return 1
fi
echo "$compose_output" | tee -a "$LOG_FILE" > /dev/null
log_success "Karakeep containers stopped"
return 0
}
# Start Karakeep containers after backup
start_containers() {
log_message "Starting Karakeep containers..."
local compose_output
if ! compose_output=$(docker compose -f "$COMPOSE_DIR/docker-compose.yml" --progress plain start 2>&1); then
echo "$compose_output" | tee -a "$LOG_FILE" > /dev/null
log_error "Failed to start Karakeep containers - manual intervention required"
return 1
fi
echo "$compose_output" | tee -a "$LOG_FILE" > /dev/null
log_success "Karakeep containers started"
return 0
}
# Copy backup to NAS
copy_to_nas() {
local src="$1"
local nas_dest="${BACKUP_ROOT}/$(basename "$src")"
log_info "Copying backup to NAS: $nas_dest"
mkdir -p "$BACKUP_ROOT"
if cp -r "$src" "$nas_dest" 2>>"$LOG_FILE"; then
log_success "Backup copied to NAS: $nas_dest"
return 0
else
log_error "Failed to copy backup to NAS"
return 1
fi
}
# Copy log files for this run to the NAS logs directory
copy_logs_to_nas() {
if [ "$SKIP_NAS" == true ]; then
return 0
fi
if ! mountpoint -q "/mnt/share/media" 2>/dev/null; then
log_warning "NAS not mounted - skipping log copy to NAS"
return 1
fi
if [ ! -d "$NAS_LOG_DIR" ]; then
if ! mkdir -p "$NAS_LOG_DIR" 2>/dev/null; then
log_warning "Could not create NAS log directory: $NAS_LOG_DIR"
return 1
fi
fi
local copied=0
for log_file in "${LOG_ROOT}/karakeep-backup-${BACKUP_TIMESTAMP}.log" \
"${LOG_ROOT}/karakeep-backup-${BACKUP_TIMESTAMP}.md"; do
if [ -f "$log_file" ]; then
if cp "$log_file" "$NAS_LOG_DIR/" 2>/dev/null; then
log_info "Copied log to NAS: $NAS_LOG_DIR/$(basename "$log_file")"
copied=$((copied + 1))
else
log_warning "Failed to copy log to NAS: $log_file"
fi
fi
done
[ "$copied" -gt 0 ] && log_success "Copied $copied log file(s) to NAS: $NAS_LOG_DIR"
return 0
}
# Clean up old backups
cleanup_old_backups() {
log_message "Cleaning up old backups..."
# Clean local backups: keep only MAX_BACKUPS_TO_KEEP most recent timestamped dirs
find "$LOCAL_BACKUP_DIR" -maxdepth 1 -mindepth 1 -type d | sort -r | \
tail -n +$((MAX_BACKUPS_TO_KEEP + 1)) | xargs rm -rf 2>/dev/null || true
# Clean local backups older than MAX_BACKUP_AGE_DAYS
find "$LOCAL_BACKUP_DIR" -maxdepth 1 -mindepth 1 -type d -mtime +${MAX_BACKUP_AGE_DAYS} | \
xargs rm -rf 2>/dev/null || true
# Clean NAS backups if accessible
if check_nas_mount && [ "$SKIP_NAS" != true ]; then
find "$BACKUP_ROOT" -maxdepth 1 -mindepth 1 -type d | sort -r | \
tail -n +$((MAX_BACKUPS_TO_KEEP + 1)) | xargs rm -rf 2>/dev/null || true
find "$BACKUP_ROOT" -maxdepth 1 -mindepth 1 -type d -mtime +${MAX_BACKUP_AGE_DAYS} | \
xargs rm -rf 2>/dev/null || true
# Clean old NAS karakeep logs
find "$NAS_LOG_DIR" -maxdepth 1 -name "karakeep-backup-*.log" -mtime +${MAX_BACKUP_AGE_DAYS} -delete 2>/dev/null || true
find "$NAS_LOG_DIR" -maxdepth 1 -name "karakeep-backup-*.md" -mtime +${MAX_BACKUP_AGE_DAYS} -delete 2>/dev/null || true
fi
# Clean up old local log files
find "$LOG_ROOT" -name "karakeep-backup-*.log" -mtime +${MAX_BACKUP_AGE_DAYS} -delete 2>/dev/null || true
find "$LOG_ROOT" -name "karakeep-backup-*.md" -mtime +${MAX_BACKUP_AGE_DAYS} -delete 2>/dev/null || true
log_success "Cleanup completed"
}
# Send notification
send_notification() {
local title="$1"
local message="$2"
local status="${3:-info}"
local success_count="${4:-0}"
local failed_count="${5:-0}"
local hostname
hostname=$(hostname)
local enhanced_message
printf -v enhanced_message "%s\n\nVolumes: %d\nSuccessful: %d\nFailed: %d\nHost: %s\nBackup: %s" \
"$message" "${#KARAKEEP_VOLUMES[@]}" "$success_count" "$failed_count" "$hostname" "$BACKUP_DEST"
case "$status" in
"success") log_success "$title: $message" ;;
"error") log_error "$title: $message" ;;
"warning") log_warning "$title: $message" ;;
*) log_info "$title: $message" ;;
esac
if [ -n "$WEBHOOK_URL" ] && [ "$DRY_RUN" != true ]; then
local tags="backup,karakeep,${hostname}"
[ "$failed_count" -gt 0 ] && tags="${tags},errors"
curl -s \
-H "tags:${tags}" \
-d "$enhanced_message" \
"$WEBHOOK_URL" 2>/dev/null || log_warning "Failed to send webhook notification"
fi
}
# Generate backup summary report
generate_summary_report() {
local success_count="$1"
local failed_count="$2"
local total_time="$3"
log_message "=== BACKUP SUMMARY REPORT ==="
log_message "Total Volumes: ${#KARAKEEP_VOLUMES[@]}"
log_message "Successful Backups: $success_count"
log_message "Failed Backups: $failed_count"
log_message "Total Time: ${total_time}s"
log_message "Backup Directory: $BACKUP_DEST"
log_message "Log File: $LOG_FILE"
log_message "Markdown Report: $MARKDOWN_LOG"
{
echo "# Karakeep Backup Summary Report"
echo "**Date**: $(date '+%Y-%m-%d %H:%M:%S')"
echo "**Host**: $(hostname)"
echo "**Total Volumes**: ${#KARAKEEP_VOLUMES[@]}"
echo "**Successful**: $success_count"
echo "**Failed**: $failed_count"
echo "**Duration**: ${total_time}s"
echo "**Backup Directory**: $BACKUP_DEST"
echo ""
} >> "$MARKDOWN_LOG"
}
# Main backup execution
main() {
local script_start_time
script_start_time=$(date +%s)
local containers_stopped=false
log_message "=== KARAKEEP BACKUP STARTED ==="
log_message "Host: $(hostname)"
log_message "Timestamp: $BACKUP_TIMESTAMP"
log_message "Dry Run: $DRY_RUN"
log_message "Stop Containers: $STOP_CONTAINERS"
log_message "Verify Backups: $VERIFY_BACKUPS"
log_message "Backup Destination: $BACKUP_DEST"
# Initialize metrics if enabled
if [[ "$METRICS_ENABLED" == "true" ]]; then
metrics_backup_start "karakeep" "Karakeep volume backup (hoarder_data, hoarder_meilisearch)" "$LOCAL_BACKUP_DIR"
metrics_status_update "initializing" "Preparing Karakeep backup"
fi
# Initialize logging
initialize_json_log
{
echo "# Karakeep Backup Report"
echo "**Started**: $(date '+%Y-%m-%d %H:%M:%S')"
echo "**Host**: $(hostname)"
echo "**Backup Timestamp**: $BACKUP_TIMESTAMP"
echo ""
} > "$MARKDOWN_LOG"
# Pre-flight: Docker available?
if ! docker info > /dev/null 2>&1; then
log_error "Docker is not running or not accessible"
if [[ "$METRICS_ENABLED" == "true" ]]; then
metrics_backup_complete "failed" "Docker is not accessible"
fi
send_notification "Karakeep Backup Failed" "Docker is not accessible" "error" 0 "${#KARAKEEP_VOLUMES[@]}"
exit 1
fi
# Pre-flight: disk space check on local backup dir
if [[ "$METRICS_ENABLED" == "true" ]]; then
metrics_status_update "checking" "Running pre-flight checks"
fi
mkdir -p "$LOCAL_BACKUP_DIR"
if ! check_disk_space "$LOCAL_BACKUP_DIR" 500; then
if [[ "$METRICS_ENABLED" == "true" ]]; then
metrics_backup_complete "failed" "Insufficient local disk space"
fi
send_notification "Karakeep Backup Failed" "Insufficient local disk space" "error" 0 "${#KARAKEEP_VOLUMES[@]}"
exit 1
fi
# Ensure containers are restarted on unexpected exit
trap 'if [[ "$containers_stopped" == "true" ]]; then log_warning "Restarting containers after unexpected exit..."; start_containers || true; fi' EXIT INT TERM
# Stop containers for consistent snapshot
if [ "$STOP_CONTAINERS" == true ] && [ "$DRY_RUN" != true ]; then
if [[ "$METRICS_ENABLED" == "true" ]]; then
metrics_status_update "backing_up" "Stopping containers for consistent backup"
fi
if ! stop_containers; then
log_warning "Could not stop containers - proceeding with hot backup"
else
containers_stopped=true
fi
fi
# Back up each volume
if [[ "$METRICS_ENABLED" == "true" ]]; then
metrics_status_update "backing_up" "Archiving Karakeep volumes"
fi
local success_count=0
local failed_count=0
local backup_results=()
for volume_name in "${!KARAKEEP_VOLUMES[@]}"; do
if backup_volume "$volume_name"; then
success_count=$((success_count + 1))
backup_results+=("$volume_name")
else
failed_count=$((failed_count + 1))
backup_results+=("$volume_name")
fi
done
# Restart containers as soon as volumes are archived
if [ "$containers_stopped" == true ]; then
if ! start_containers; then
log_error "CRITICAL: Failed to restart Karakeep containers after backup"
send_notification "Karakeep Backup WARNING" "Containers failed to restart after backup - manual intervention required" "error" "$success_count" "$failed_count"
fi
containers_stopped=false
fi
# Copy to NAS if available and not skipped
if [ "$SKIP_NAS" != true ] && [ "$DRY_RUN" != true ]; then
if [[ "$METRICS_ENABLED" == "true" ]]; then
metrics_status_update "backing_up" "Copying backup to NAS"
fi
if check_nas_mount; then
if ! copy_to_nas "$BACKUP_DEST"; then
log_warning "NAS copy failed - local backup is still available at $BACKUP_DEST"
fi
else
log_warning "NAS not available - backup retained locally at $BACKUP_DEST"
fi
fi
# Calculate elapsed time
local script_end_time
script_end_time=$(date +%s)
local total_time=$((script_end_time - script_start_time))
track_performance "full_karakeep_backup" "$script_start_time" "$script_end_time"
# Clean up old backups
if [ "$DRY_RUN" != true ]; then
if [[ "$METRICS_ENABLED" == "true" ]]; then
metrics_status_update "cleaning_up" "Removing old backup archives"
fi
cleanup_old_backups
fi
# Generate summary
generate_summary_report "$success_count" "$failed_count" "$total_time"
{
echo "## Backup Results"
for result in "${backup_results[@]}"; do
echo "- $result"
done
echo ""
echo "**Completed**: $(date '+%Y-%m-%d %H:%M:%S')"
echo "**Duration**: ${total_time}s"
} >> "$MARKDOWN_LOG"
# Copy logs to NAS
if [ "$DRY_RUN" != true ]; then
copy_logs_to_nas
fi
# Send notification
local status="success"
local message="Karakeep backup completed successfully (${success_count}/${#KARAKEEP_VOLUMES[@]} volumes)"
if [ "$DRY_RUN" == true ]; then
message="Karakeep backup dry run completed"
status="info"
elif [ "$failed_count" -gt 0 ]; then
status="warning"
message="Karakeep backup completed with $failed_count failure(s)"
fi
send_notification "Karakeep Backup Complete" "$message" "$status" "$success_count" "$failed_count"
# Finalize metrics
if [[ "$METRICS_ENABLED" == "true" ]]; then
if [ "$failed_count" -gt 0 ]; then
metrics_backup_complete "completed_with_errors" "Karakeep backup completed with $failed_count failure(s)"
elif [ "$DRY_RUN" == true ]; then
metrics_backup_complete "success" "Karakeep backup dry run completed"
else
metrics_backup_complete "success" "Karakeep backup completed successfully"
fi
fi
if [ "$failed_count" -gt 0 ]; then
exit 1
fi
log_success "All Karakeep volume backups completed successfully!"
exit 0
}
main "$@"

View File

@@ -6,13 +6,13 @@
set -e
# Color codes for output
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
CYAN='\033[0;36m'
PURPLE='\033[0;35m'
NC='\033[0m' # No Color
RED=$'\033[0;31m'
GREEN=$'\033[0;32m'
YELLOW=$'\033[1;33m'
BLUE=$'\033[0;34m'
CYAN=$'\033[0;36m'
PURPLE=$'\033[0;35m'
NC=$'\033[0m' # No Color
SCRIPT_DIR="$(dirname "$(readlink -f "$0")")"
LOG_DIR="$SCRIPT_DIR/logs"
@@ -23,7 +23,7 @@ HOSTNAME=$(hostname)
mkdir -p "$LOG_DIR" "$REPORT_DIR"
# Backup service tags for monitoring
BACKUP_TAGS=("plex-backup" "backup-move" "plex-validation" "immich-backup" "plex-report" "crontab-backup")
BACKUP_TAGS=("plex-backup" "backup-move" "plex-validation" "immich-backup" "plex-report" "crontab-backup" "karakeep-backup")
log_message() {
echo -e "$(date '+%H:%M:%S') $1"
@@ -207,10 +207,11 @@ check_backup_health() {
# Check if backup scripts exist
local backup_scripts=(
"/home/acedanger/shell/backup-plex.sh"
"/home/acedanger/shell/move-backups.sh"
"/home/acedanger/shell/validate-plex-backups.sh"
"/home/acedanger/shell/crontab/crontab-backup-system.sh"
"/home/acedanger/shell/backup-karakeep.sh"
"/home/acedanger/shell/backup-gitea.sh"
"/home/acedanger/shell/backup-docker.sh"
"/home/acedanger/shell/backup-media.sh"
"/home/acedanger/shell/backup-env-files.sh"
)
for script in "${backup_scripts[@]}"; do
@@ -225,9 +226,9 @@ check_backup_health() {
# Check if backup directories exist
local backup_dirs=(
"/mnt/share/media/backups/plex"
"/mnt/share/media/backups/docker-data"
"/mnt/share/media/backups/karakeep"
"/mnt/share/media/backups/immich"
"/mnt/share/media/backups/logs"
)
for dir in "${backup_dirs[@]}"; do
@@ -314,6 +315,11 @@ show_service_status() {
local last_entry
last_entry=$(sudo journalctl -t "$tag" --output=short-iso -n 1 2>/dev/null | tail -1)
# Filter out journalctl informational messages (e.g., "-- No entries --")
if [[ "$last_entry" =~ ^-- ]]; then
last_entry=""
fi
if [ -n "$last_entry" ]; then
local timestamp
local message
@@ -327,18 +333,28 @@ show_service_status() {
current_time=$(date +%s)
local hours_diff=$(( (current_time - entry_time) / 3600 ))
local status
local status_word status
if [ "$hours_diff" -le 24 ]; then
status="${GREEN}Active${NC}"
status_word="Active"
status="${GREEN}${status_word}${NC}"
elif [ "$hours_diff" -le 48 ]; then
status="${YELLOW}Recent${NC}"
status_word="Recent"
status="${YELLOW}${status_word}${NC}"
else
status="${RED}Stale${NC}"
status_word="Stale"
status="${RED}${status_word}${NC}"
fi
printf "%-20s %-25s %-20s %-30s\n" "$tag" "$status" "$timestamp" "$message"
# Adjust printf width to account for invisible ANSI escape code bytes
local ansi_overhead=$(( ${#status} - ${#status_word} ))
local padded_width=$(( 15 + ansi_overhead ))
printf "%-20s %-${padded_width}s %-20s %-30s\n" "$tag" "$status" "$timestamp" "$message"
else
printf "%-20s %-25s %-20s %-30s\n" "$tag" "${RED}No logs${NC}" "Never" "No activity found"
local no_logs_word="No logs"
local no_logs="${RED}${no_logs_word}${NC}"
local ansi_overhead=$(( ${#no_logs} - ${#no_logs_word} ))
local padded_width=$(( 15 + ansi_overhead ))
printf "%-20s %-${padded_width}s %-20s %-30s\n" "$tag" "$no_logs" "Never" "No activity found"
fi
done
}

View File

@@ -11,6 +11,9 @@
# Backs up Docker container configurations and data
30 2 * * * { echo "Starting Docker backup"; /home/acedanger/shell/backup-media.sh; echo "Docker media backup completed with exit code: $?"; } 2>&1 | logger -t docker-media-backup -p user.info
# Daily Karakeep backup at 0300 with logging
# Backs up hoarder_data and hoarder_meilisearch volumes to NAS
0 3 * * * { echo "Starting Karakeep backup"; /home/acedanger/shell/backup-karakeep.sh; echo "Karakeep backup completed with exit code: $?"; } 2>&1 | logger -t karakeep-backup -p user.info
# Daily system backup at 0400 with auto-cleanup
0 4 * * * /home/acedanger/shell/crontab/crontab-backup-system.sh backup auto --auto-cleanup 2>&1 | logger -t crontab-backup -p user.info

View File

@@ -9,7 +9,10 @@ Usage Examples:
1. List all running containers:
$ dm list
2. Update a specific project (pulls latest images and recreates containers):
2. List only stacks with updates available:
$ dm list --update
3. Update a specific project (pulls latest images and recreates containers):
$ dm update media-server
3. Stop all projects (with confirmation prompt):
@@ -171,7 +174,7 @@ def run_command(cmd, cwd, capture_output=False):
return None
def list_containers(projects):
def list_containers(projects, show_updates_only=False):
"""List running containers for all projects."""
table = Table(title="Docker Containers", box=box.ROUNDED)
table.add_column("Project", style="cyan", no_wrap=True)
@@ -186,6 +189,11 @@ def list_containers(projects):
found_any = False
for name, path in sorted(projects.items()):
# Buffer for project rows
project_rows = []
project_has_update = False
# Get running container names
cmd = ["docker", "compose", "ps", "--format", "json"]
res = run_command(cmd, path, capture_output=True)
@@ -204,8 +212,6 @@ def list_containers(projects):
if not c_name:
continue
found_any = True
# Get version and update status
version = "unknown"
update_status = ""
@@ -214,7 +220,7 @@ def list_containers(projects):
norm_image = image
tag = "latest"
# Remove tag if present (heuristic: split on last colon, if right side has no slashes)
# Remove tag if present
if ":" in norm_image:
base, sep, t = norm_image.rpartition(":")
if sep and "/" not in t:
@@ -231,27 +237,21 @@ def list_containers(projects):
if norm_image in diun_info:
image_tags = diun_info[norm_image]
# Only proceed if we have info for this specific tag
if tag in image_tags:
info = image_tags[tag]
# Try to get version from Diun labels first
version = get_image_version(info.get("labels", {}))
# Check for updates
# First get the Image ID of the running container
inspect_id_cmd = ["docker", "inspect", c_name, "--format", "{{.Image}}"]
inspect_id_res = run_command(inspect_id_cmd, path, capture_output=True)
if inspect_id_res and inspect_id_res.returncode == 0:
image_id = inspect_id_res.stdout.strip()
# Now inspect the Image ID to get RepoDigests
inspect_digest_cmd = ["docker", "inspect", image_id, "--format", "{{if .RepoDigests}}{{index .RepoDigests 0}}{{end}}"]
inspect_digest_res = run_command(inspect_digest_cmd, path, capture_output=True)
if inspect_digest_res and inspect_digest_res.returncode == 0:
running_digest_full = inspect_digest_res.stdout.strip()
# running_digest is like name@sha256:hash
if "@" in running_digest_full:
running_digest = running_digest_full.split("@")[1]
latest_digest = info.get("digest", "")
@@ -259,6 +259,7 @@ def list_containers(projects):
if latest_digest:
if running_digest != latest_digest:
update_status = "[bold red]Update Available[/bold red]"
project_has_update = True
else:
update_status = "[green]Up to Date[/green]"
else:
@@ -266,7 +267,6 @@ def list_containers(projects):
else:
update_status = "[dim]Not Monitored[/dim]"
# If version is still unknown, try to get from running container labels
if version in ("latest", "unknown"):
inspect_cmd = ["docker", "inspect", c_name, "--format", "{{json .Config.Labels}}"]
inspect_res = run_command(inspect_cmd, path, capture_output=True)
@@ -277,13 +277,25 @@ def list_containers(projects):
except Exception:
pass
table.add_row(name, c_name, state, image, version, update_status)
project_rows.append((name, c_name, state, image, version, update_status))
except json.JSONDecodeError:
pass
# If not hiding, OR if update, add to table
if not show_updates_only or project_has_update:
if project_rows:
if found_any:
table.add_section()
found_any = True
for row in project_rows:
table.add_row(*row)
if found_any:
console.print(table)
else:
if show_updates_only:
console.print("[green]No updates available for running containers.[/green]")
else:
console.print("[yellow]No running containers found in managed projects.[/yellow]")
@@ -544,7 +556,8 @@ def main():
dest="command", help="Command to execute")
# List command configuration
subparsers.add_parser("list", help="List running containers")
list_parser = subparsers.add_parser("list", help="List running containers")
list_parser.add_argument("--update", action="store_true", help="Show only stacks that have updates available")
# Describe command configuration
describe_parser = subparsers.add_parser("describe", help="Show details of a project's containers")
@@ -596,7 +609,7 @@ def main():
# Dispatch commands
if args.command == "list":
list_containers(projects)
list_containers(projects, show_updates_only=args.update)
elif args.command == "describe":
describe_project(projects, args.project)
elif args.command == "volumes":
@@ -636,4 +649,7 @@ def main():
if __name__ == "__main__":
try:
main()
finally:
console.show_cursor(True)

View File

@@ -53,13 +53,13 @@ cleanup() {
trap cleanup EXIT SIGINT SIGTERM
# Load environment variables from the .env file
ENV_FILE="$(dirname "$0")/../.env"
ENV_FILE="${SCRIPT_DIR}/../.env"
if [ -f "$ENV_FILE" ]; then
echo "Loading environment variables from $ENV_FILE"
# shellcheck source=/dev/null
source "$ENV_FILE"
else
echo "Error: .env file not found in $(dirname "$0")/.."
echo "Error: .env file not found in ${SCRIPT_DIR}/.."
exit 1
fi
@@ -109,6 +109,7 @@ EXAMPLES:
$(basename "$0") --help # Show this help
$(basename "$0") --dry-run # Preview backup without executing
$(basename "$0") --no-upload # Backup locally only (skip B2)
$(basename "$0") --upload-only # Only upload the latest existing backup to B2
RESTORE INSTRUCTIONS:
https://immich.app/docs/administration/backup-and-restore/
@@ -119,6 +120,7 @@ EOF
# Parse command line arguments
DRY_RUN=false
NO_UPLOAD=false
UPLOAD_ONLY=false
VERBOSE=false
while [[ $# -gt 0 ]]; do
@@ -135,6 +137,10 @@ while [[ $# -gt 0 ]]; do
NO_UPLOAD=true
shift
;;
--upload-only)
UPLOAD_ONLY=true
shift
;;
--verbose)
VERBOSE=true
shift
@@ -148,12 +154,12 @@ while [[ $# -gt 0 ]]; do
done
# B2 CLI tool path
if [ -f "$(dirname "$0")/b2-linux" ]; then
B2_CLI="$(dirname "$0")/b2-linux"
if [ -f "${SCRIPT_DIR}/b2-linux" ]; then
B2_CLI="${SCRIPT_DIR}/b2-linux"
elif command -v b2 &> /dev/null; then
B2_CLI=$(command -v b2)
else
B2_CLI="$(dirname "$0")/b2-linux"
B2_CLI="${SCRIPT_DIR}/b2-linux"
fi
# Notification function
@@ -204,17 +210,40 @@ upload_to_b2() {
log_message "Uploading $filename to B2 bucket: $B2_BUCKET_NAME"
# Authorize B2 account
if ! "$B2_CLI" authorize-account "$B2_APPLICATION_KEY_ID" "$B2_APPLICATION_KEY" 2>/dev/null; then
local auth_output
if ! auth_output=$("$B2_CLI" authorize-account "$B2_APPLICATION_KEY_ID" "$B2_APPLICATION_KEY" 2>&1); then
log_message "Error: Failed to authorize B2 account"
log_message "B2 Output: $auth_output"
return 1
fi
# Upload file to B2
if "$B2_CLI" upload-file "$B2_BUCKET_NAME" "$file_path" "immich-backups/$filename" 2>/dev/null; then
local temp_log
temp_log=$(mktemp)
# Enable pipefail to catch b2 exit code through tee
set -o pipefail
# Use --threads 4 to avoid "More than one concurrent upload using auth token" error
# which can happen with default thread count on large files
if "$B2_CLI" file upload --threads 4 "$B2_BUCKET_NAME" "$file_path" "immich-backups/$filename" 2>&1 | tee "$temp_log"; then
set +o pipefail
rm "$temp_log"
log_message "✅ Successfully uploaded $filename to B2"
return 0
else
local exit_code=$?
set +o pipefail
log_message "❌ Failed to upload $filename to B2"
# Log the last few lines of output to capture the error message
# avoiding the progress bar spam
local error_msg
error_msg=$(tail -n 20 "$temp_log")
log_message "B2 Output (last 20 lines):"
log_message "$error_msg"
rm "$temp_log"
return 1
fi
}
@@ -223,7 +252,7 @@ upload_to_b2() {
IMMICH_SERVER_RUNNING=true
# Set up logging to central logs directory
LOG_DIR="$(dirname "$0")/../logs"
LOG_DIR="${SCRIPT_DIR}/../logs"
mkdir -p "$LOG_DIR"
LOG_FILE="${LOG_DIR}/immich-backup.log"
@@ -231,6 +260,11 @@ LOG_FILE="${LOG_DIR}/immich-backup.log"
log_message() {
echo "$(date '+%Y-%m-%d %H:%M:%S') - $1" | tee -a "$LOG_FILE"
}
# Function to log status (wrapper for log_message)
log_status() {
log_message "$1"
}
# Create backup directory if it doesn't exist
BACKUP_DIR="$(dirname "$0")/../immich_backups"
mkdir -p "$BACKUP_DIR"
@@ -238,10 +272,35 @@ mkdir -p "$BACKUP_DIR"
# Shared backup directory (can be overridden in .env)
SHARED_BACKUP_DIR="${SHARED_BACKUP_DIR:-/mnt/share/media/backups/immich}"
# Generate timestamp for the backup filename
# Handle upload-only mode
if [ "$UPLOAD_ONLY" = true ]; then
log_message "=== UPLOAD ONLY MODE ==="
log_message "Skipping backup creation, looking for latest backups in $SHARED_BACKUP_DIR"
# Find latest database backup
LATEST_DB=$(ls -t "$SHARED_BACKUP_DIR"/immich_db_backup_*.sql.gz 2>/dev/null | head -n1)
if [ -f "$LATEST_DB" ]; then
log_message "Found latest database backup: $LATEST_DB"
upload_to_b2 "$LATEST_DB"
else
log_message "Warning: No database backup found in $SHARED_BACKUP_DIR"
fi
# Find latest uploads backup
LATEST_UPLOADS=$(ls -t "$SHARED_BACKUP_DIR"/immich_uploads_*.tar.gz 2>/dev/null | head -n1)
if [ -f "$LATEST_UPLOADS" ]; then
log_message "Found latest uploads backup: $LATEST_UPLOADS"
upload_to_b2 "$LATEST_UPLOADS"
else
log_message "Warning: No uploads backup found in $SHARED_BACKUP_DIR"
fi
log_message "Upload only mode completed."
exit 0
fi
# Create backup directory if it doesn't exist
BACKUP_DIR="$(dirname "$0")/../immich_backups"
BACKUP_DIR="${SCRIPT_DIR}/../immich_backups"
mkdir -p "$BACKUP_DIR"
# Generate timestamp for the backup filename
@@ -441,8 +500,9 @@ log_message "Creating compressed archive of upload directory..."
log_message "This may take a while depending on the size of your media library..."
# Use tar with progress indication and exclude any existing backup files in the upload location
if ! tar --exclude="${UPLOAD_LOCATION}/backups/*.tar.gz" \
--exclude="${UPLOAD_LOCATION}/backups/*.sql.gz" \
# Note: Exclude patterns must match the relative path structure used by -C
if ! tar --exclude="$(basename "${UPLOAD_LOCATION}")/backups/*.tar.gz" \
--exclude="$(basename "${UPLOAD_LOCATION}")/backups/*.sql.gz" \
-czf "${UPLOAD_BACKUP_PATH}" \
-C "$(dirname "${UPLOAD_LOCATION}")" \
"$(basename "${UPLOAD_LOCATION}")"; then

97
jellyfin/repair_jellyfin_db.sh Executable file
View File

@@ -0,0 +1,97 @@
#!/bin/bash
set -e
# Configuration
CONTAINER_NAME="jellyfin"
DB_PATH_IN_CONTAINER="/config/data"
DB_FILES=("library.db" "jellyfin.db")
BACKUP_DIR="/tmp/jellyfin_db_backup_$(date +%Y%m%d_%H%M%S)"
REPAIR_DIR="/tmp/jellyfin_db_repair"
# --- Functions ---
# Function to print messages
log() {
echo "[$(date +'%Y-%m-%d %H:%M:%S')] $1"
}
# Function to stop the Jellyfin container
stop_container() {
log "Stopping Jellyfin container..."
docker stop "$CONTAINER_NAME"
}
# Function to start the Jellyfin container
start_container() {
log "Starting Jellyfin container..."
docker start "$CONTAINER_NAME"
}
# Function to create a backup of the database files
backup_database() {
log "Backing up database files to $BACKUP_DIR..."
mkdir -p "$BACKUP_DIR"
for db_file in "${DB_FILES[@]}"; do
docker cp "${CONTAINER_NAME}:${DB_PATH_IN_CONTAINER}/${db_file}" "$BACKUP_DIR/"
done
}
# Function to repair a database file
repair_database() {
local db_file="$1"
local db_path_in_repair_dir="${REPAIR_DIR}/${db_file}"
local sql_dump_file="${REPAIR_DIR}/${db_file}.sql"
local new_db_file="${REPAIR_DIR}/${db_file}.new"
log "Repairing ${db_file}..."
# Check for corruption
log "Running integrity check on ${db_file}..."
if sqlite3 "$db_path_in_repair_dir" "PRAGMA integrity_check;" | grep -q "ok"; then
log "${db_file} is not corrupted. Skipping repair."
return
fi
log "Dumping ${db_file} to SQL file..."
sqlite3 "$db_path_in_repair_dir" .dump > "$sql_dump_file"
log "Creating new database from SQL dump..."
sqlite3 "$new_db_file" < "$sql_dump_file"
log "Replacing old database with the new one..."
mv "$new_db_file" "$db_path_in_repair_dir"
}
# --- Main Script ---
# Stop the container
stop_container
# Create repair directory
mkdir -p "$REPAIR_DIR"
# Copy database files to repair directory
log "Copying database files to repair directory..."
for db_file in "${DB_FILES[@]}"; do
docker cp "${CONTAINER_NAME}:${DB_PATH_IN_CONTAINER}/${db_file}" "$REPAIR_DIR/"
done
# Repair each database file
for db_file in "${DB_FILES[@]}"; do
repair_database "$db_file"
done
# Copy repaired files back to the container
log "Copying repaired files back to the container..."
for db_file in "${DB_FILES[@]}"; do
docker cp "${REPAIR_DIR}/${db_file}" "${CONTAINER_NAME}:${DB_PATH_IN_CONTAINER}/${db_file}"
done
# Clean up repair directory
rm -rf "$REPAIR_DIR"
# Start the container
start_container
log "Database repair process completed."

View File

@@ -0,0 +1,18 @@
#!/bin/bash
# Define the list of script names
scripts=("backup-update-master.sh" "backup-ba-up-ma.sh" "config-ba-up-ma.sh" "cron-ba-up-ma.sh" "update-ba-up-ma.sh" "utils-ba-up-ma.sh")
# Define the base URL for the Gist raw files
base_url="https://raw.githubusercontent.com/hhftechnology/pangolin-backup-update/refs/heads/main"
# Download each script
for script in "${scripts[@]}"; do
curl -o "$script" "$base_url/$script"
done
# Make all .sh files executable
chmod +x *.sh
# Run the master script
./backup-update-master.sh

3
plex/.gitignore vendored Normal file
View File

@@ -0,0 +1,3 @@
.venv/
__pycache__/
*.pyc

View File

@@ -1,130 +0,0 @@
# Plex Database Corruption Resolution Summary
## ✅ ISSUE RESOLVED: Auto-Repair Cycle Causing Corruption
### Root Cause Identified
The primary cause of your Plex database corruption was an **aggressive auto-repair schedule** running every 30 minutes via cron:
```bash
# PROBLEMATIC (FIXED):
*/30 * * * * /home/acedanger/shell/plex/backup-plex.sh --check-integrity --auto-repair
```
This caused:
- 48+ service stops/starts per day
- WAL file manipulation conflicts
- Repair cascading failures
- Race conditions during service transitions
### ✅ Changes Applied
#### 1. **Fixed Crontab Schedule**
- **Before**: Auto-repair every 30 minutes + daily backup with auto-repair
- **After**:
- Daily read-only integrity check (6 AM)
- Daily backup with auto-repair **disabled** (4:15 AM)
- Manual repair intervention required
#### 2. **Disabled Auto-Repair Default**
- Changed `backup-plex.sh` default from `AUTO_REPAIR=true` to `AUTO_REPAIR=false`
- Prevents automatic repair loops that were causing corruption
#### 3. **Created Consolidated Management Tool**
- New script: `plex-db-manager.sh`
- Safe, read-only integrity checking
- Manual repair intervention (currently disabled for safety)
- Proper service management with synchronization
#### 4. **Database Status** ✅
Current check shows: **ALL DATABASES HEALTHY**
- Main database: integrity check PASSED
- Blobs database: integrity check PASSED
## 📋 Script Redundancy Analysis
### Scripts with Overlapping Functionality
1. **`plex.sh`** - Service management + basic repair
2. **`backup-plex.sh`** - Backup + auto-repair logic
3. **`plex-database-repair.sh`** - Dedicated repair functions
4. **`recover-plex-database.sh`** - Advanced recovery methods
5. **`nuclear-plex-recovery.sh`** - Nuclear recovery
6. **`restore-plex.sh`** - Backup restoration
### Consolidation Recommendations
#### Keep Active:
- **`backup-plex.sh`** - Primary backup (with auto-repair disabled)
- **`plex-db-manager.sh`** - New consolidated management tool
- **`plex.sh`** - Basic service management
- **`nuclear-plex-recovery.sh`** - Last resort recovery
#### Consider Deprecating:
- **`plex-database-repair.sh`** - Functionality moved to `plex-db-manager.sh`
- **`recover-plex-database.sh`** - Similar functionality in other scripts
- **`restore-plex.sh`** - Basic functionality covered elsewhere
## 🛡️ Prevention Measures Implemented
### 1. **Conservative Backup Schedule**
```bash
# Read-only check (daily at 6 AM)
0 6 * * * backup-plex.sh --check-integrity --disable-auto-repair
# Backup without auto-repair (daily at 4:15 AM)
15 4 * * * backup-plex.sh --non-interactive --disable-auto-repair
```
### 2. **Manual Intervention Required**
- No automatic repairs unless explicitly requested
- All repair operations require manual approval
- Comprehensive logging for audit trail
### 3. **Safe Service Management**
- Proper service stop/start synchronization
- Extended timeouts for clean shutdowns
- Race condition prevention
## 📊 Expected Improvements
1. **Stability**: Eliminated 47 daily service interruptions
2. **Reliability**: No more auto-repair corruption loops
3. **Performance**: Reduced I/O load on database files
4. **Maintainability**: Centralized database management
## 🔧 Usage Going Forward
### Regular Monitoring:
```bash
# Check database health (safe, read-only)
./plex-db-manager.sh check
```
### If Issues Detected:
```bash
# View detailed logs
tail -f /home/acedanger/shell/plex/logs/plex-backup-$(date +%Y-%m-%d).log
# Manual repair (when re-enabled)
./plex-db-manager.sh repair
```
### Emergency Recovery:
```bash
# Only if all else fails
sudo ./nuclear-plex-recovery.sh --auto
```
## ⚠️ Critical Notes
1. **Auto-repair is temporarily disabled** until stability is confirmed
2. **Manual intervention required** for any database issues
3. **Monitor logs closely** for the next week to ensure stability
4. **Backup integrity** should improve significantly
---
**Date Fixed**: June 21, 2025
**Issue**: 30-minute auto-repair cycle causing database corruption
**Resolution**: Disabled aggressive auto-repair, implemented safe backup schedule
**Status**: ✅ RESOLVED - Databases currently healthy

View File

@@ -51,12 +51,12 @@
# Critical operations use explicit error checking instead of automatic exit
# Color codes for output
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
CYAN='\033[0;36m'
NC='\033[0m' # No Color
RED=$'\033[0;31m'
GREEN=$'\033[0;32m'
YELLOW=$'\033[1;33m'
BLUE=$'\033[0;34m'
CYAN=$'\033[0;36m'
NC=$'\033[0m' # No Color
# Performance tracking variables (removed unused variables)

View File

@@ -15,11 +15,11 @@
################################################################################
# Color codes
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
RED='\033[0;31m'
NC='\033[0m' # No Color
GREEN=$'\033[0;32m'
YELLOW=$'\033[1;33m'
BLUE=$'\033[0;34m'
RED=$'\033[0;31m'
NC=$'\033[0m' # No Color
# Plex database path
PLEX_DB_PATH="/var/lib/plexmediaserver/Library/Application Support/Plex Media Server/Plug-in Support/Databases"

View File

@@ -12,11 +12,11 @@
set +e
# Colors for output
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m' # No Color
RED=$'\033[0;31m'
GREEN=$'\033[0;32m'
YELLOW=$'\033[1;33m'
BLUE=$'\033[0;34m'
NC=$'\033[0m' # No Color
# Configuration
PLEX_DB_DIR="/var/lib/plexmediaserver/Library/Application Support/Plex Media Server/Plug-in Support/Databases"

View File

@@ -55,12 +55,12 @@
set -e
# Color codes for output
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
CYAN='\033[0;36m'
NC='\033[0m' # No Color
RED=$'\033[0;31m'
GREEN=$'\033[0;32m'
YELLOW=$'\033[1;33m'
BLUE=$'\033[0;34m'
CYAN=$'\033[0;36m'
NC=$'\033[0m' # No Color
# Test configuration
SCRIPT_DIR="$(dirname "$(readlink -f "$0")")"

View File

@@ -49,13 +49,13 @@
set -e
# Color codes for output
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
CYAN='\033[0;36m'
MAGENTA='\033[0;35m'
NC='\033[0m' # No Color
RED=$'\033[0;31m'
GREEN=$'\033[0;32m'
YELLOW=$'\033[1;33m'
BLUE=$'\033[0;34m'
CYAN=$'\033[0;36m'
MAGENTA=$'\033[0;35m'
NC=$'\033[0m' # No Color
# Configuration
SCRIPT_DIR="$(dirname "$(readlink -f "$0")")"

View File

@@ -58,14 +58,15 @@
set -e
# Colors for output
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m' # No Color
RED=$'\033[0;31m'
GREEN=$'\033[0;32m'
YELLOW=$'\033[1;33m'
BLUE=$'\033[0;34m'
NC=$'\033[0m' # No Color
# Configuration
PLEX_DB_DIR="/var/lib/plexmediaserver/Library/Application Support/Plex Media Server/Plug-in Support/Databases"
PLEX_SQLITE="/usr/lib/plexmediaserver/Plex SQLite"
PLEX_USER="plex"
PLEX_GROUP="plex"
BACKUP_TIMESTAMP=$(date +"%Y%m%d_%H%M%S")
@@ -244,27 +245,66 @@ restore_from_backup() {
fi
}
# Function to verify restored databases
# Function to verify restored databases (structural + FTS)
verify_databases() {
print_status "$YELLOW" "Verifying restored databases..."
# Check main database
if sqlite3 "${PLEX_DB_DIR}/com.plexapp.plugins.library.db" "PRAGMA integrity_check;" | grep -q "ok"; then
print_status "$GREEN" "Main database integrity check: PASSED"
else
print_status "$RED" "Main database integrity check: FAILED"
return 1
# Use Plex's bundled SQLite for ICU compatibility; fall back to system sqlite3
local sqlite_bin="sqlite3"
if [[ -x "$PLEX_SQLITE" ]]; then
sqlite_bin="$PLEX_SQLITE"
fi
# Check blobs database
if sqlite3 "${PLEX_DB_DIR}/com.plexapp.plugins.library.blobs.db" "PRAGMA integrity_check;" | grep -q "ok"; then
print_status "$GREEN" "Blobs database integrity check: PASSED"
else
print_status "$RED" "Blobs database integrity check: FAILED"
return 1
local overall_ok=true
for db_file in \
"${PLEX_DB_DIR}/com.plexapp.plugins.library.db" \
"${PLEX_DB_DIR}/com.plexapp.plugins.library.blobs.db"; do
local db_name
db_name=$(basename "$db_file")
if [[ ! -f "$db_file" ]]; then
print_status "$RED" "$db_name: NOT FOUND"
overall_ok=false
continue
fi
# Structural integrity
local result
result=$("$sqlite_bin" "$db_file" "PRAGMA integrity_check;" 2>&1)
if [[ "$result" == "ok" ]]; then
print_status "$GREEN" "$db_name structural integrity: PASSED"
else
print_status "$RED" "$db_name structural integrity: FAILED"
overall_ok=false
fi
# FTS index integrity
local fts_tables
fts_tables=$("$sqlite_bin" "$db_file" \
"SELECT name FROM sqlite_master WHERE type='table' AND sql LIKE '%fts%';" 2>/dev/null) || true
if [[ -n "$fts_tables" ]]; then
while IFS= read -r table; do
[[ -z "$table" ]] && continue
local fts_result
fts_result=$("$sqlite_bin" "$db_file" \
"INSERT INTO ${table}(${table}) VALUES('integrity-check');" 2>&1) || true
if [[ -n "$fts_result" ]]; then
print_status "$RED" "$db_name FTS index '$table': DAMAGED"
overall_ok=false
fi
done <<< "$fts_tables"
fi
done
if [[ "$overall_ok" == true ]]; then
print_status "$GREEN" "All database integrity checks passed!"
return 0
else
print_status "$RED" "One or more database checks failed!"
return 1
fi
}
# Function to fix ownership issues

View File

@@ -34,15 +34,15 @@
set -euo pipefail
# Color codes for output
readonly RED='\033[0;31m'
readonly GREEN='\033[0;32m'
readonly YELLOW='\033[1;33m'
readonly BLUE='\033[0;34m'
readonly CYAN='\033[0;36m'
readonly WHITE='\033[1;37m'
readonly BOLD='\033[1m'
readonly DIM='\033[2m'
readonly RESET='\033[0m'
readonly RED=$'\033[0;31m'
readonly GREEN=$'\033[0;32m'
readonly YELLOW=$'\033[1;33m'
readonly BLUE=$'\033[0;34m'
readonly CYAN=$'\033[0;36m'
readonly WHITE=$'\033[1;37m'
readonly BOLD=$'\033[1m'
readonly DIM=$'\033[2m'
readonly RESET=$'\033[0m'
# Configuration
readonly PLEX_SERVICE="plexmediaserver"
@@ -56,6 +56,251 @@ SCRIPT_DIR="$(dirname "$(readlink -f "$0")")"
readonly SCRIPT_DIR
readonly LOG_FILE="$SCRIPT_DIR/logs/db-manager-$(date +%Y%m%d_%H%M%S).log"
# DBRepair.sh location — searched in order: script dir, database dir, /usr/local/bin
readonly DBREPAIR_INSTALL_PATH="${SCRIPT_DIR}/DBRepair.sh"
readonly DBREPAIR_GITHUB_API="https://api.github.com/repos/ChuckPa/PlexDBRepair/releases"
readonly DBREPAIR_DOWNLOAD_BASE="https://github.com/ChuckPa/PlexDBRepair/releases/download"
find_dbrepair() {
local candidates=(
"${SCRIPT_DIR}/DBRepair.sh"
"${PLEX_DB_DIR}/DBRepair.sh"
"/usr/local/bin/DBRepair.sh"
)
for path in "${candidates[@]}"; do
if [[ -x "$path" ]]; then
echo "$path"
return 0
fi
done
return 1
}
# Get the latest non-beta release tag from GitHub
_dbrepair_latest_release_tag() {
local tag
tag=$(curl -fsSL "${DBREPAIR_GITHUB_API}" 2>/dev/null \
| grep -Eo '"tag_name"\s*:\s*"[^"]+"' \
| head -n 1 \
| sed 's/"tag_name"\s*:\s*"//;s/"//')
if [[ -z "$tag" ]]; then
return 1
fi
echo "$tag"
}
# Get currently installed DBRepair version
_dbrepair_installed_version() {
local bin="$1"
grep -oP 'Version\s+v\K[0-9.]+' "$bin" 2>/dev/null | head -n 1
}
# Install or update DBRepair.sh
install_or_update_dbrepair() {
log_message "Checking DBRepair (ChuckPa/PlexDBRepair)..."
local latest_tag
if ! latest_tag=$(_dbrepair_latest_release_tag); then
log_error "Failed to query GitHub for the latest release"
log_warning "Check your internet connection or try: https://github.com/ChuckPa/PlexDBRepair/releases"
return 1
fi
log_message "Latest stable release: ${latest_tag}"
local dbrepair_bin
if dbrepair_bin=$(find_dbrepair); then
local installed_ver
installed_ver=$(_dbrepair_installed_version "$dbrepair_bin")
local remote_ver
remote_ver=$(echo "$latest_tag" | sed 's/^v//')
if [[ -n "$installed_ver" ]]; then
log_message "Installed version: v${installed_ver} at ${dbrepair_bin}"
if [[ "$installed_ver" == "$remote_ver" ]]; then
log_success "DBRepair is already up to date (v${installed_ver})"
return 0
else
log_warning "Update available: v${installed_ver} -> ${latest_tag}"
fi
else
log_warning "Installed at ${dbrepair_bin} (version unknown), will update"
fi
else
log_message "DBRepair not found — installing to ${DBREPAIR_INSTALL_PATH}"
fi
local download_url="${DBREPAIR_DOWNLOAD_BASE}/${latest_tag}/DBRepair.sh"
log_message "Downloading ${download_url}"
if curl -fsSL -o "${DBREPAIR_INSTALL_PATH}" "$download_url"; then
chmod +x "${DBREPAIR_INSTALL_PATH}"
log_success "DBRepair ${latest_tag} installed to ${DBREPAIR_INSTALL_PATH}"
return 0
else
log_error "Download failed"
rm -f "${DBREPAIR_INSTALL_PATH}" 2>/dev/null
return 1
fi
}
# Suggest installing DBRepair when errors are found and it's not available
_hint_install_dbrepair() {
if ! find_dbrepair >/dev/null 2>&1; then
echo ""
log_warning "DBRepair is NOT installed. It can fix most database issues automatically."
echo -e " ${CYAN}Install it now: $(basename "$0") install-dbrepair${RESET}"
echo -e " ${CYAN}Then repair: $(basename "$0") repair${RESET}"
fi
}
# List and manage database backup files
list_db_backups() {
local db_dir="$PLEX_DB_DIR"
local -a backup_files=()
local -a backup_paths=()
while IFS= read -r -d '' entry; do
backup_paths+=("$entry")
done < <(sudo find "$db_dir" -maxdepth 1 \( \
-name '*-BACKUP-*' -o \
-name '*-BKUP-*' -o \
-name '*.backup.*' -o \
-name '*recovery*' -o \
-name 'corrupted-*' -o \
-name '*-BLOATED-*' \
\) -print0 2>/dev/null | sort -z)
if [[ ${#backup_paths[@]} -eq 0 ]]; then
log_message "No database backup files found in the Plex database directory"
return 0
fi
echo -e "\n${BOLD}${WHITE} # Type Size Created Name${RESET}"
echo -e " --- ------------- ----------- ------------------------ ------------------------------------"
local idx=0
for entry in "${backup_paths[@]}"; do
((idx++))
local name
name=$(basename "$entry")
local type_label
if [[ "$name" == *-BACKUP-* || "$name" == *-BKUP-* ]]; then
type_label="DBRepair"
elif [[ "$name" == *-BLOATED-* ]]; then
type_label="Bloated"
elif [[ "$name" == *.backup.* ]]; then
type_label="Script"
elif [[ "$name" == corrupted-* ]]; then
type_label="Corrupted"
elif [[ "$name" == *recovery* ]]; then
type_label="Recovery"
else
type_label="Other"
fi
local size
if [[ -d "$entry" ]]; then
size=$(sudo du -sh "$entry" 2>/dev/null | cut -f1)
type_label="${type_label}/dir"
else
size=$(sudo stat --printf='%s' "$entry" 2>/dev/null)
if [[ -n "$size" ]]; then
size=$(numfmt --to=iec-i --suffix=B "$size" 2>/dev/null || echo "${size}B")
else
size="?"
fi
fi
local created
created=$(sudo stat --printf='%y' "$entry" 2>/dev/null | cut -d. -f1)
[[ -z "$created" ]] && created="unknown"
printf " ${WHITE}%-3s${RESET} ${CYAN}%-13s${RESET} ${YELLOW}%-11s${RESET} %-24s %s\n" \
"$idx" "$type_label" "$size" "$created" "$name"
backup_files+=("$entry")
done
echo -e " --- ------------- ----------- ------------------------ ------------------------------------"
echo -e " Total: ${idx} backup file(s)\n"
_BACKUP_LIST=("${backup_files[@]}")
_BACKUP_COUNT=$idx
}
# Interactive backup deletion
delete_db_backups_interactive() {
list_db_backups
if [[ ${_BACKUP_COUNT:-0} -eq 0 ]]; then
return 0
fi
echo -e "${CYAN}Enter backup number(s) to delete (comma-separated), or 'q' to cancel:${RESET} "
read -r selection
if [[ "$selection" == "q" || -z "$selection" ]]; then
log_message "Cancelled"
return 0
fi
IFS=',' read -ra nums <<< "$selection"
local deleted=0
for num in "${nums[@]}"; do
num=$(echo "$num" | tr -d ' ')
if ! [[ "$num" =~ ^[0-9]+$ ]] || (( num < 1 || num > _BACKUP_COUNT )); then
log_error "Invalid selection: $num (skipping)"
continue
fi
local target="${_BACKUP_LIST[$((num-1))]}"
local target_name
target_name=$(basename "$target")
echo -e "${YELLOW}Delete ${target_name}? [y/N]:${RESET} "
read -r confirm
if [[ "${confirm,,}" == "y" ]]; then
if [[ -d "$target" ]]; then
sudo rm -rf "$target"
else
sudo rm -f "$target"
fi
log_success "Deleted: $target_name"
((deleted++))
else
log_message "Skipped: $target_name"
fi
done
echo ""
log_message "Deleted $deleted backup(s)"
}
# Delete backup by name/pattern
delete_db_backup_by_name() {
local pattern="$1"
local db_dir="$PLEX_DB_DIR"
local found=0
while IFS= read -r -d '' entry; do
local name
name=$(basename "$entry")
echo -e "${YELLOW}Delete ${name}? [y/N]:${RESET} "
read -r confirm
if [[ "${confirm,,}" == "y" ]]; then
if [[ -d "$entry" ]]; then
sudo rm -rf "$entry"
else
sudo rm -f "$entry"
fi
log_success "Deleted: $name"
((found++))
fi
done < <(sudo find "$db_dir" -maxdepth 1 -name "*${pattern}*" -print0 2>/dev/null)
if [[ $found -eq 0 ]]; then
log_error "No backups matching '${pattern}' found"
return 1
fi
log_message "Deleted $found file(s)"
}
# Create log directory
mkdir -p "$SCRIPT_DIR/logs"
@@ -210,26 +455,54 @@ check_database_integrity() {
fi
fi
# Run integrity check
# Run structural integrity check
local integrity_result
integrity_result=$(sudo "$PLEX_SQLITE" "$db_file" "PRAGMA integrity_check;" 2>&1)
local check_exit_code=$?
if [[ $check_exit_code -ne 0 ]]; then
log_error "Failed to run integrity check on $db_name"
if [[ $check_exit_code -ne 0 && -z "$integrity_result" ]]; then
log_error "Failed to open database: $db_name (exit code $check_exit_code)"
return 1
fi
if echo "$integrity_result" | grep -q "^ok$"; then
log_success "Database integrity check passed: $db_name"
return 0
local struct_ok=true
if [[ "$integrity_result" == "ok" ]]; then
log_success "Structural integrity check passed: $db_name"
else
log_warning "Database integrity issues detected in $db_name:"
echo "$integrity_result" | while IFS= read -r line; do
struct_ok=false
log_warning "Structural integrity issues detected in $db_name:"
echo "$integrity_result" | head -n 10 | while IFS= read -r line; do
log_warning " $line"
done
return 1
fi
# FTS (Full-Text Search) index integrity check
# Standard PRAGMA integrity_check does NOT detect FTS corruption.
local fts_ok=true
local fts_tables
fts_tables=$(sudo "$PLEX_SQLITE" "$db_file" \
"SELECT name FROM sqlite_master WHERE type='table' AND sql LIKE '%fts%';" 2>/dev/null) || true
if [[ -n "$fts_tables" ]]; then
log_message "Checking FTS (Full-Text Search) indexes in $db_name..."
while IFS= read -r table; do
[[ -z "$table" ]] && continue
local fts_result
fts_result=$(sudo "$PLEX_SQLITE" "$db_file" \
"INSERT INTO ${table}(${table}) VALUES('integrity-check');" 2>&1) || true
if [[ -n "$fts_result" ]]; then
fts_ok=false
log_warning "FTS index '${table}' — DAMAGED: $fts_result"
else
log_success "FTS index '${table}' — OK"
fi
done <<< "$fts_tables"
fi
if [[ "$struct_ok" == true && "$fts_ok" == true ]]; then
return 0
fi
return 1
}
# Check all databases
@@ -254,26 +527,35 @@ check_all_databases() {
return 0
else
log_warning "Found integrity issues in $issues database(s)"
_hint_install_dbrepair
return 1
fi
}
# Show help
show_help() {
echo -e "${BOLD}${WHITE}Usage:${RESET} ${CYAN}$(basename "$0")${RESET} ${YELLOW}<command>${RESET} ${DIM}[options]${RESET}"
local script
script=$(basename "$0")
echo -e "${BOLD}${WHITE}Usage:${RESET} ${CYAN}${script}${RESET} ${YELLOW}<command>${RESET} ${DIM}[options]${RESET}"
echo ""
echo -e "${BOLD}${WHITE}Commands:${RESET}"
echo -e " ${GREEN}${BOLD}check${RESET} Read-only database integrity check"
echo -e " ${YELLOW}${BOLD}repair${RESET} Interactive database repair"
echo -e " ${YELLOW}${BOLD}repair --gentle${RESET} Gentle repair methods only"
echo -e " ${RED}${BOLD}repair --force${RESET} Aggressive repair methods"
echo -e " ${RED}${BOLD}nuclear${RESET} Nuclear recovery (replace from backup)"
echo -e " ${CYAN}${BOLD}help${RESET} Show this help message"
printf " ${GREEN}${BOLD}%-18s${RESET} %s\n" "check" "Read-only database integrity check"
printf " ${YELLOW}${BOLD}%-18s${RESET} %s\n" "repair" "Interactive database repair"
printf " ${YELLOW}${BOLD}%-18s${RESET} %s\n" "repair --gentle" "Gentle repair methods only"
printf " ${RED}${BOLD}%-18s${RESET} %s\n" "repair --force" "Aggressive repair methods"
printf " ${RED}${BOLD}%-18s${RESET} %s\n" "nuclear" "Nuclear recovery (replace from backup)"
printf " ${CYAN}${BOLD}%-18s${RESET} %s\n" "backups" "List and manage database backup files"
printf " ${GREEN}${BOLD}%-18s${RESET} %s\n" "install-dbrepair" "Install or update DBRepair tool"
printf " ${CYAN}${BOLD}%-18s${RESET} %s\n" "help" "Show this help message"
echo ""
echo -e "${BOLD}${WHITE}Examples:${RESET}"
echo -e " ${DIM}$(basename "$0") check # Safe integrity check${RESET}"
echo -e " ${DIM}$(basename "$0") repair # Interactive repair${RESET}"
echo -e " ${DIM}$(basename "$0") repair --gentle # Minimal repair only${RESET}"
printf " ${DIM}%-46s # %s${RESET}\n" "${script} check" "Safe integrity check"
printf " ${DIM}%-46s # %s${RESET}\n" "${script} repair" "Interactive repair"
printf " ${DIM}%-46s # %s${RESET}\n" "${script} repair --gentle" "Minimal repair only"
printf " ${DIM}%-46s # %s${RESET}\n" "${script} backups" "List DB backups"
printf " ${DIM}%-46s # %s${RESET}\n" "${script} backups delete" "Interactive backup deletion"
printf " ${DIM}%-46s # %s${RESET}\n" "${script} backups delete --name foo" "Delete by name pattern"
printf " ${DIM}%-46s # %s${RESET}\n" "${script} install-dbrepair" "Install/update DBRepair"
echo ""
echo -e "${BOLD}${YELLOW}⚠️ WARNING:${RESET} Always run ${CYAN}check${RESET} first before attempting repairs!"
echo ""
@@ -315,20 +597,72 @@ main() {
;;
"repair")
echo -e "${RED}${BOLD}⚠️ REPAIR FUNCTIONALITY TEMPORARILY DISABLED${RESET}"
echo -e "${YELLOW}Database repairs are disabled until corruption issues are resolved.${RESET}"
echo -e "${CYAN}Use the individual repair scripts if manual intervention is needed:${RESET}"
echo -e " ${DIM}- plex-database-repair.sh${RESET}"
echo -e " ${DIM}- recover-plex-database.sh${RESET}"
echo -e " ${DIM}- nuclear-plex-recovery.sh${RESET}"
print_header
check_prerequisites
local dbrepair_bin
if dbrepair_bin=$(find_dbrepair); then
log_success "Found DBRepair.sh: $dbrepair_bin"
log_message "Running: stop → auto (check + repair + reindex + FTS rebuild) → start → exit"
if sudo "$dbrepair_bin" stop auto start exit; then
log_success "DBRepair automatic repair completed successfully"
exit 0
else
log_error "DBRepair automatic repair failed"
exit 2
fi
else
echo -e "${RED}${BOLD}⚠️ DBRepair.sh NOT FOUND${RESET}"
echo -e "${YELLOW}You can install it automatically:${RESET}"
echo -e " ${CYAN}$(basename "$0") install-dbrepair${RESET}"
echo -e "${YELLOW}Then re-run: $(basename "$0") repair${RESET}"
exit 2
fi
;;
"nuclear")
echo -e "${RED}${BOLD}⚠️ NUCLEAR RECOVERY TEMPORARILY DISABLED${RESET}"
echo -e "${YELLOW}Nuclear recovery is disabled until corruption issues are resolved.${RESET}"
echo -e "${CYAN}Use nuclear-plex-recovery.sh directly if absolutely necessary.${RESET}"
print_header
check_prerequisites
echo -e "\n${RED}${BOLD}⚠️ WARNING: NUCLEAR RECOVERY ⚠️${RESET}"
echo -e "${RED}This replaces your Plex database with the best available PMS backup!${RESET}"
echo -e "${YELLOW}All changes since the backup was created will be lost.${RESET}\n"
echo -e "${CYAN}Type 'YES' to proceed: ${RESET}"
read -r confirmation
if [[ "$confirmation" != "YES" ]]; then
log_message "Nuclear recovery cancelled by user"
exit 0
fi
local dbrepair_bin
if dbrepair_bin=$(find_dbrepair); then
log_success "Found DBRepair.sh: $dbrepair_bin"
log_message "Running: stop → replace → reindex → start → exit"
if sudo "$dbrepair_bin" stop replace reindex start exit; then
log_success "Nuclear recovery (replace from backup) completed"
exit 0
else
log_error "Nuclear recovery failed"
exit 2
fi
else
# Fallback to dedicated nuclear script
local nuclear_script="${SCRIPT_DIR}/nuclear-plex-recovery.sh"
if [[ -x "$nuclear_script" ]]; then
log_message "DBRepair not found, falling back to nuclear-plex-recovery.sh"
if sudo "$nuclear_script" --auto; then
log_success "Nuclear recovery completed"
exit 0
else
log_error "Nuclear recovery failed"
exit 2
fi
else
log_error "Neither DBRepair.sh nor nuclear-plex-recovery.sh found"
exit 2
fi
fi
;;
"help"|"--help"|"-h")
@@ -336,6 +670,24 @@ main() {
show_help
;;
"install-dbrepair"|"update-dbrepair"|"dbrepair")
print_header
install_or_update_dbrepair
;;
"backups"|"backup-list")
print_header
if [[ $# -ge 2 && "${2,,}" == "delete" ]]; then
if [[ $# -ge 4 && "${3}" == "--name" ]]; then
delete_db_backup_by_name "$4"
else
delete_db_backups_interactive
fi
else
list_db_backups
fi
;;
*)
print_header
log_error "Unknown command: $1"

View File

@@ -57,24 +57,285 @@
set -euo pipefail
# 🎨 Color definitions for sexy output
readonly RED='\033[0;31m'
readonly GREEN='\033[0;32m'
readonly YELLOW='\033[1;33m'
readonly BLUE='\033[0;34m'
readonly PURPLE='\033[0;35m'
readonly CYAN='\033[0;36m'
readonly WHITE='\033[1;37m'
readonly BOLD='\033[1m'
readonly DIM='\033[2m'
readonly RESET='\033[0m'
readonly RED=$'\033[0;31m'
readonly GREEN=$'\033[0;32m'
readonly YELLOW=$'\033[1;33m'
readonly BLUE=$'\033[0;34m'
readonly PURPLE=$'\033[0;35m'
readonly CYAN=$'\033[0;36m'
readonly WHITE=$'\033[1;37m'
readonly BOLD=$'\033[1m'
readonly DIM=$'\033[2m'
readonly RESET=$'\033[0m'
# 🔧 Configuration
readonly PLEX_SERVICE="plexmediaserver"
readonly PLEX_SQLITE="/usr/lib/plexmediaserver/Plex SQLite"
readonly PLEX_DB_DIR="/var/lib/plexmediaserver/Library/Application Support/Plex Media Server/Plug-in Support/Databases"
SCRIPT_NAME="$(basename "$0")"
readonly SCRIPT_NAME
SCRIPT_DIR="$(dirname "$(readlink -f "$0")")"
readonly SCRIPT_DIR
# DBRepair.sh location — searched in order: script dir, database dir, /usr/local/bin
readonly DBREPAIR_INSTALL_PATH="${SCRIPT_DIR}/DBRepair.sh"
readonly DBREPAIR_GITHUB_API="https://api.github.com/repos/ChuckPa/PlexDBRepair/releases"
readonly DBREPAIR_DOWNLOAD_BASE="https://github.com/ChuckPa/PlexDBRepair/releases/download"
find_dbrepair() {
local candidates=(
"${SCRIPT_DIR}/DBRepair.sh"
"${PLEX_DB_DIR}/DBRepair.sh"
"/usr/local/bin/DBRepair.sh"
)
for path in "${candidates[@]}"; do
if [[ -x "$path" ]]; then
echo "$path"
return 0
fi
done
return 1
}
# Get the latest non-beta release tag from GitHub
_dbrepair_latest_release_tag() {
# Fetch releases, filter out pre-releases & drafts, grab the first tag_name
local tag
tag=$(curl -fsSL "${DBREPAIR_GITHUB_API}" 2>/dev/null \
| grep -Eo '"tag_name"\s*:\s*"[^"]+"' \
| head -n 1 \
| sed 's/"tag_name"\s*:\s*"//;s/"//')
if [[ -z "$tag" ]]; then
return 1
fi
echo "$tag"
}
# Get currently installed DBRepair version (from its own version output)
_dbrepair_installed_version() {
local bin="$1"
# DBRepair prints "Version vX.YY.ZZ" near the top when run interactively;
# we can also grep the script file itself for the version string.
grep -oP 'Version\s+v\K[0-9.]+' "$bin" 2>/dev/null | head -n 1
}
# Install or update DBRepair.sh
install_or_update_dbrepair() {
print_status "${INFO}" "Checking DBRepair (ChuckPa/PlexDBRepair)..." "${BLUE}"
# Determine latest non-beta release
local latest_tag
if ! latest_tag=$(_dbrepair_latest_release_tag); then
print_status "${CROSS}" "Failed to query GitHub for the latest release" "${RED}"
print_status "${INFO}" "Check your internet connection or try: https://github.com/ChuckPa/PlexDBRepair/releases" "${YELLOW}"
return 1
fi
print_status "${INFO}" "Latest stable release: ${latest_tag}" "${BLUE}"
# Check if already installed
local dbrepair_bin
if dbrepair_bin=$(find_dbrepair); then
local installed_ver
installed_ver=$(_dbrepair_installed_version "$dbrepair_bin")
local remote_ver
remote_ver=$(echo "$latest_tag" | sed 's/^v//')
if [[ -n "$installed_ver" ]]; then
print_status "${INFO}" "Installed version: v${installed_ver} at ${dbrepair_bin}" "${BLUE}"
if [[ "$installed_ver" == "$remote_ver" ]]; then
print_status "${CHECKMARK}" "DBRepair is already up to date (v${installed_ver})" "${GREEN}"
return 0
else
print_status "${INFO}" "Update available: v${installed_ver} -> ${latest_tag}" "${YELLOW}"
fi
else
print_status "${INFO}" "Installed at ${dbrepair_bin} (version unknown), will update" "${YELLOW}"
fi
else
print_status "${INFO}" "DBRepair not found — installing to ${DBREPAIR_INSTALL_PATH}" "${BLUE}"
fi
# Download
local download_url="${DBREPAIR_DOWNLOAD_BASE}/${latest_tag}/DBRepair.sh"
print_status "${INFO}" "Downloading ${download_url}" "${BLUE}"
if curl -fsSL -o "${DBREPAIR_INSTALL_PATH}" "$download_url"; then
chmod +x "${DBREPAIR_INSTALL_PATH}"
print_status "${CHECKMARK}" "DBRepair ${latest_tag} installed to ${DBREPAIR_INSTALL_PATH}" "${GREEN}"
return 0
else
print_status "${CROSS}" "Download failed" "${RED}"
rm -f "${DBREPAIR_INSTALL_PATH}" 2>/dev/null
return 1
fi
}
# Suggest installing DBRepair when errors are found and it's not available
_hint_install_dbrepair() {
if ! find_dbrepair >/dev/null 2>&1; then
echo ""
print_status "${INFO}" "DBRepair is NOT installed. It can fix most database issues automatically." "${YELLOW}"
echo -e "${DIM}${CYAN} Install it now: ${SCRIPT_NAME} install-dbrepair${RESET}"
echo -e "${DIM}${CYAN} Then repair: ${SCRIPT_NAME} repair${RESET}"
fi
}
# 📦 List and manage database backup files
# Covers: DBRepair backups (-BACKUP-*, -BKUP-*), script backups (*.backup.*),
# corrupted dirs (corrupted-*), recovery files (*recovery*)
list_db_backups() {
local db_dir="$PLEX_DB_DIR"
local -a backup_files=()
local -a backup_paths=()
# Collect all backup-like files and dirs
while IFS= read -r -d '' entry; do
backup_paths+=("$entry")
done < <(sudo find "$db_dir" -maxdepth 1 \( \
-name '*-BACKUP-*' -o \
-name '*-BKUP-*' -o \
-name '*.backup.*' -o \
-name '*recovery*' -o \
-name 'corrupted-*' -o \
-name '*-BLOATED-*' \
\) -print0 2>/dev/null | sort -z)
if [[ ${#backup_paths[@]} -eq 0 ]]; then
print_status "${INFO}" "No database backup files found in the Plex database directory" "${YELLOW}"
return 0
fi
echo -e "\n${BOLD}${WHITE} # Type Size Created Name${RESET}"
echo -e "${DIM}${CYAN} --- ------------- ----------- ------------------------ ------------------------------------${RESET}"
local idx=0
for entry in "${backup_paths[@]}"; do
((idx++))
local name
name=$(basename "$entry")
# Determine type label
local type_label
if [[ "$name" == *-BACKUP-* || "$name" == *-BKUP-* ]]; then
type_label="DBRepair"
elif [[ "$name" == *-BLOATED-* ]]; then
type_label="Bloated"
elif [[ "$name" == *.backup.* ]]; then
type_label="Script"
elif [[ "$name" == corrupted-* ]]; then
type_label="Corrupted"
elif [[ "$name" == *recovery* ]]; then
type_label="Recovery"
else
type_label="Other"
fi
# Size (human-readable)
local size
if [[ -d "$entry" ]]; then
size=$(sudo du -sh "$entry" 2>/dev/null | cut -f1)
type_label="${type_label}/dir"
else
size=$(sudo stat --printf='%s' "$entry" 2>/dev/null)
if [[ -n "$size" ]]; then
size=$(numfmt --to=iec-i --suffix=B "$size" 2>/dev/null || echo "${size}B")
else
size="?"
fi
fi
# Created date
local created
created=$(sudo stat --printf='%y' "$entry" 2>/dev/null | cut -d. -f1)
[[ -z "$created" ]] && created="unknown"
printf " ${WHITE}%-3s${RESET} ${CYAN}%-13s${RESET} ${YELLOW}%-11s${RESET} ${DIM}%-24s${RESET} %s\n" \
"$idx" "$type_label" "$size" "$created" "$name"
backup_files+=("$entry")
done
echo -e "${DIM}${CYAN} --- ------------- ----------- ------------------------ ------------------------------------${RESET}"
echo -e " ${DIM}Total: ${idx} backup file(s)${RESET}\n"
# Store for use by delete function
_BACKUP_LIST=("${backup_files[@]}")
_BACKUP_COUNT=$idx
}
# Interactive backup deletion
delete_db_backups_interactive() {
list_db_backups
if [[ ${_BACKUP_COUNT:-0} -eq 0 ]]; then
return 0
fi
echo -e "${CYAN}Enter backup number(s) to delete (comma-separated), or 'q' to cancel:${RESET} "
read -r selection
if [[ "$selection" == "q" || -z "$selection" ]]; then
print_status "${INFO}" "Cancelled" "${YELLOW}"
return 0
fi
# Parse comma-separated numbers
IFS=',' read -ra nums <<< "$selection"
local deleted=0
for num in "${nums[@]}"; do
num=$(echo "$num" | tr -d ' ')
if ! [[ "$num" =~ ^[0-9]+$ ]] || (( num < 1 || num > _BACKUP_COUNT )); then
print_status "${CROSS}" "Invalid selection: $num (skipping)" "${RED}"
continue
fi
local target="${_BACKUP_LIST[$((num-1))]}"
local target_name
target_name=$(basename "$target")
echo -e "${YELLOW}Delete ${target_name}? [y/N]:${RESET} "
read -r confirm
if [[ "${confirm,,}" == "y" ]]; then
if [[ -d "$target" ]]; then
sudo rm -rf "$target"
else
sudo rm -f "$target"
fi
print_status "${CHECKMARK}" "Deleted: $target_name" "${GREEN}"
((deleted++))
else
print_status "${INFO}" "Skipped: $target_name" "${YELLOW}"
fi
done
echo ""
print_status "${INFO}" "Deleted $deleted backup(s)" "${BLUE}"
}
# Delete backup by name/pattern (for scripted use)
delete_db_backup_by_name() {
local pattern="$1"
local db_dir="$PLEX_DB_DIR"
local found=0
while IFS= read -r -d '' entry; do
local name
name=$(basename "$entry")
echo -e "${YELLOW}Delete ${name}? [y/N]:${RESET} "
read -r confirm
if [[ "${confirm,,}" == "y" ]]; then
if [[ -d "$entry" ]]; then
sudo rm -rf "$entry"
else
sudo rm -f "$entry"
fi
print_status "${CHECKMARK}" "Deleted: $name" "${GREEN}"
((found++))
fi
done < <(sudo find "$db_dir" -maxdepth 1 -name "*${pattern}*" -print0 2>/dev/null)
if [[ $found -eq 0 ]]; then
print_status "${CROSS}" "No backups matching '${pattern}' found" "${RED}"
return 1
fi
print_status "${INFO}" "Deleted $found file(s)" "${BLUE}"
}
# 🎭 ASCII symbols for compatible output
readonly CHECKMARK="[✓]"
readonly CROSS="[✗]"
@@ -114,218 +375,229 @@ show_loading() {
printf "\r%s%s %s %s%s\n" "${CYAN}" "${HOURGLASS}" "${message}" "${CHECKMARK}" "${RESET}"
}
# 🔧 Enhanced function to repair database issues
# 🔧 Repair database using ChuckPa/DBRepair when available, else manual steps
# DBRepair: https://github.com/ChuckPa/PlexDBRepair
repair_database() {
print_status "${INFO}" "Attempting to repair Plex database..." "${BLUE}"
local db_dir="/var/lib/plexmediaserver/Library/Application Support/Plex Media Server/Plug-in Support/Databases"
local main_db="$db_dir/com.plexapp.plugins.library.db"
local backup_db="$db_dir/com.plexapp.plugins.library.db.backup.$(date +%Y%m%d_%H%M%S)"
local corrupted_dir="$db_dir/corrupted-$(date +%Y%m%d_%H%M%S)"
local main_db="$PLEX_DB_DIR/com.plexapp.plugins.library.db"
if [[ ! -f "$main_db" ]]; then
print_status "${CROSS}" "Main database not found at: $main_db" "${RED}"
return 1
fi
# ---------- Try DBRepair.sh (preferred) ----------
local dbrepair_bin
if dbrepair_bin=$(find_dbrepair); then
print_status "${CHECKMARK}" "Found DBRepair.sh: $dbrepair_bin" "${GREEN}"
print_status "${INFO}" "Running: stop → auto (check + repair + reindex + FTS rebuild) → start → exit" "${BLUE}"
# DBRepair supports scripted mode — pass commands directly
if sudo "$dbrepair_bin" stop auto start exit; then
print_status "${CHECKMARK}" "DBRepair automatic repair completed successfully!" "${GREEN}"
return 0
else
print_status "${CROSS}" "DBRepair automatic repair failed (exit code $?)" "${RED}"
print_status "${INFO}" "Falling back to manual repair steps..." "${YELLOW}"
fi
else
print_status "${INFO}" "DBRepair.sh not found — using manual repair" "${YELLOW}"
echo -e "${DIM}${CYAN} For better repairs, install DBRepair:${RESET}"
echo -e "${DIM}${CYAN} wget -O ${SCRIPT_DIR}/DBRepair.sh https://github.com/ChuckPa/PlexDBRepair/releases/latest/download/DBRepair.sh${RESET}"
echo -e "${DIM}${CYAN} chmod +x ${SCRIPT_DIR}/DBRepair.sh${RESET}"
fi
# ---------- Manual fallback (same approach DBRepair uses internally) ----------
# Prefer Plex's bundled SQLite for ICU compatibility
local sqlite_bin="sqlite3"
if [[ -x "$PLEX_SQLITE" ]]; then
sqlite_bin="$PLEX_SQLITE"
fi
# Stop Plex service first
print_status "${INFO}" "Stopping Plex service..." "${BLUE}"
sudo systemctl stop "$PLEX_SERVICE" 2>/dev/null || true
sleep 3
# Check if critical tables exist
print_status "${INFO}" "Checking database structure..." "${BLUE}"
local has_metadata_table=false
if sudo -u plex sqlite3 "$main_db" "SELECT name FROM sqlite_master WHERE type='table' AND name='metadata_items';" 2>/dev/null | grep -q metadata_items; then
has_metadata_table=true
# Step 1: WAL checkpoint
if [[ -f "${main_db}-wal" ]]; then
print_status "${INFO}" "Checkpointing WAL journal..." "${BLUE}"
sudo -u plex "$sqlite_bin" "$main_db" "PRAGMA wal_checkpoint(TRUNCATE);" 2>/dev/null || true
fi
if [[ "$has_metadata_table" == "false" ]]; then
print_status "${CROSS}" "Critical table 'metadata_items' is missing! Database is severely corrupted." "${RED}"
print_status "${INFO}" "Attempting recovery from available backups..." "${YELLOW}"
# Step 2: Export → import (the core of DBRepair's repair/optimize)
local ts
ts=$(date +%Y%m%d_%H%M%S)
local sql_dump="/tmp/plex_dump_${ts}.sql"
local new_db="/tmp/plex_repaired_${ts}.db"
local backup_db="$PLEX_DB_DIR/com.plexapp.plugins.library.db-BACKUP-${ts}"
# Find the best recovery candidate
local recovery_db=""
local recovery_candidates=(
"$db_dir/com.plexapp.plugins.library.db.recovery-"*
"$db_dir/com.plexapp.plugins.library.db.20"*
)
for candidate in "${recovery_candidates[@]}"; do
if [[ -f "$candidate" && "$candidate" != *"tmp"* && "$candidate" != *"empty"* ]]; then
# Test if this candidate has the metadata_items table
if sudo -u plex sqlite3 "$candidate" "SELECT name FROM sqlite_master WHERE type='table' AND name='metadata_items';" 2>/dev/null | grep -q metadata_items; then
recovery_db="$candidate"
break
print_status "${INFO}" "Exporting database to SQL..." "${BLUE}"
if sudo -u plex "$sqlite_bin" "$main_db" ".dump" | sudo -u plex tee "$sql_dump" >/dev/null 2>&1; then
print_status "${CHECKMARK}" "SQL export completed ($(du -sh "$sql_dump" | cut -f1))" "${GREEN}"
else
print_status "${CROSS}" "SQL export failed — database may be too damaged for this method" "${RED}"
rm -f "$sql_dump" 2>/dev/null
print_status "${INFO}" "Install DBRepair.sh for more robust repair capabilities" "${YELLOW}"
return 1
fi
print_status "${INFO}" "Importing into fresh database..." "${BLUE}"
if sudo cat "$sql_dump" | sudo -u plex "$sqlite_bin" "$new_db" 2>/dev/null; then
print_status "${CHECKMARK}" "Import into fresh database succeeded" "${GREEN}"
else
print_status "${CROSS}" "Import failed" "${RED}"
rm -f "$sql_dump" "$new_db" 2>/dev/null
return 1
fi
done
rm -f "$sql_dump" 2>/dev/null
if [[ -n "$recovery_db" ]]; then
print_status "${CHECKMARK}" "Found recovery database: $(basename "$recovery_db")" "${GREEN}"
# Step 3: Verify repaired database (structural + FTS)
print_status "${INFO}" "Verifying repaired database..." "${BLUE}"
local verify_out
verify_out=$(sudo -u plex "$sqlite_bin" "$new_db" "PRAGMA integrity_check;" 2>&1)
if [[ "$verify_out" != "ok" ]]; then
print_status "${CROSS}" "Repaired database failed integrity check" "${RED}"
rm -f "$new_db" 2>/dev/null
return 1
fi
# Move corrupted database to backup location
print_status "${INFO}" "Moving corrupted database to backup location..." "${BLUE}"
sudo mkdir -p "$corrupted_dir"
sudo mv "$main_db" "$corrupted_dir/"
sudo mv "$main_db-shm" "$corrupted_dir/" 2>/dev/null || true
sudo mv "$main_db-wal" "$corrupted_dir/" 2>/dev/null || true
# Step 4: Reindex (rebuilds all indexes INCLUDING FTS)
print_status "${INFO}" "Rebuilding indexes (including FTS)..." "${BLUE}"
sudo -u plex "$sqlite_bin" "$new_db" "REINDEX;" 2>/dev/null || true
# Copy recovery database as new main database
print_status "${INFO}" "Restoring database from recovery file..." "${BLUE}"
if sudo cp "$recovery_db" "$main_db"; then
# Rebuild FTS index content explicitly
local fts_tables
fts_tables=$(sudo -u plex "$sqlite_bin" "$new_db" \
"SELECT name FROM sqlite_master WHERE type='table' AND sql LIKE '%fts%';" 2>/dev/null) || true
if [[ -n "$fts_tables" ]]; then
while IFS= read -r table; do
[[ -z "$table" ]] && continue
print_status "${INFO}" "Rebuilding FTS index: $table" "${BLUE}"
sudo -u plex "$sqlite_bin" "$new_db" \
"INSERT INTO ${table}(${table}) VALUES('rebuild');" 2>/dev/null || true
done <<< "$fts_tables"
fi
# Step 5: Swap databases
print_status "${INFO}" "Backing up old database and activating repaired copy..." "${BLUE}"
sudo cp "$main_db" "$backup_db"
sudo mv "$new_db" "$main_db"
sudo chown plex:plex "$main_db"
sudo chmod 644 "$main_db"
print_status "${CHECKMARK}" "Database restored successfully!" "${GREEN}"
# Remove stale journals for the old database
sudo rm -f "${main_db}-shm" "${main_db}-wal" 2>/dev/null || true
# Verify the restored database
print_status "${INFO}" "Verifying restored database..." "${BLUE}"
local integrity_result
integrity_result=$(sudo -u plex sqlite3 "$main_db" "PRAGMA integrity_check;" 2>&1)
if echo "$integrity_result" | grep -q "ok"; then
print_status "${CHECKMARK}" "Restored database integrity verified!" "${GREEN}"
print_status "${CHECKMARK}" "Database repaired and activated. Backup at: $(basename "$backup_db")" "${GREEN}"
return 0
elif echo "$integrity_result" | grep -q "no such collation sequence: icu"; then
print_status "${CROSS}" "ICU collation sequence issue detected!" "${YELLOW}"
print_status "${INFO}" "Attempting ICU-aware recovery..." "${BLUE}"
}
# Try ICU-aware recovery script
local icu_script="${SCRIPT_DIR}/icu-aware-recovery.sh"
if [[ -f "$icu_script" ]]; then
if "$icu_script" --auto; then
print_status "${CHECKMARK}" "ICU-aware recovery completed!" "${GREEN}"
# 🔍 Function to check FTS (Full-Text Search) index integrity
# Standard PRAGMA integrity_check does NOT detect FTS corruption.
# This is the exact class of damage shown in the user's screenshot.
check_fts_integrity() {
local db_file="$1"
local sqlite_bin="${2:-sqlite3}" # Use Plex SQLite if available
local issues=0
# Discover FTS tables dynamically from sqlite_master
local fts_tables
fts_tables=$(sudo -u plex "$sqlite_bin" "$db_file" \
"SELECT name FROM sqlite_master WHERE type='table' AND sql LIKE '%fts%';" 2>/dev/null) || return 0
if [[ -z "$fts_tables" ]]; then
return 0 # No FTS tables — nothing to check
fi
print_status "${INFO}" "Checking FTS (Full-Text Search) indexes..." "${BLUE}"
while IFS= read -r table; do
[[ -z "$table" ]] && continue
local result
result=$(sudo -u plex "$sqlite_bin" "$db_file" \
"INSERT INTO ${table}(${table}) VALUES('integrity-check');" 2>&1) || true
if [[ -n "$result" ]]; then
print_status "${CROSS}" "FTS index '${table}' — DAMAGED" "${RED}"
echo -e "${DIM}${YELLOW} $result${RESET}"
((issues++))
else
print_status "${CHECKMARK}" "FTS index '${table}' — OK" "${GREEN}"
fi
done <<< "$fts_tables"
if (( issues > 0 )); then
print_status "${CROSS}" "FTS integrity check complete. $issues index(es) damaged." "${RED}"
print_status "${INFO}" "Run: ${SCRIPT_NAME} repair (uses DBRepair reindex to rebuild FTS)" "${YELLOW}"
return 1
fi
return 0
else
print_status "${CROSS}" "ICU-aware recovery failed!" "${RED}"
fi
else
print_status "${INFO}" "ICU recovery script not found, trying manual fix..." "${YELLOW}"
# Try to recreate database without ICU dependencies
local temp_db="/tmp/plex_temp_$(date +%Y%m%d_%H%M%S).db"
print_status "${INFO}" "Attempting to dump and recreate database..." "${BLUE}"
if sudo -u plex sqlite3 "$recovery_db" ".dump" | grep -v "COLLATE icu_" | sudo -u plex sqlite3 "$temp_db"; then
print_status "${INFO}" "Database dump successful, replacing main database..." "${BLUE}"
sudo mv "$temp_db" "$main_db"
sudo chown plex:plex "$main_db"
sudo chmod 644 "$main_db"
# Verify the recreated database
if sudo -u plex sqlite3 "$main_db" "PRAGMA integrity_check;" 2>/dev/null | grep -q "ok"; then
print_status "${CHECKMARK}" "Database recreated successfully without ICU!" "${GREEN}"
return 0
fi
fi
# Clean up temp file if it exists
sudo rm -f "$temp_db" 2>/dev/null || true
fi
print_status "${CROSS}" "Failed to resolve ICU collation issues!" "${RED}"
return 1
else
print_status "${CROSS}" "Restored database failed integrity check!" "${RED}"
print_status "${INFO}" "Integrity check result:" "${YELLOW}"
echo -e "${DIM}${YELLOW} $integrity_result${RESET}"
return 1
fi
else
print_status "${CROSS}" "Failed to restore database!" "${RED}"
return 1
fi
else
print_status "${CROSS}" "No valid recovery databases found!" "${RED}"
print_status "${INFO}" "Available options:" "${YELLOW}"
echo -e "${DIM}${YELLOW} 1. Check manual backups in /mnt/share/media/backups/plex/${RESET}"
echo -e "${DIM}${YELLOW} 2. Let Plex rebuild database (will lose all metadata)${RESET}"
echo -e "${DIM}${YELLOW} 3. Run: sudo rm '$main_db' && sudo systemctl start plexmediaserver${RESET}"
return 1
fi
fi
# Create backup of current database
print_status "${INFO}" "Creating backup of current database..." "${BLUE}"
if ! sudo cp "$main_db" "$backup_db"; then
print_status "${CROSS}" "Failed to create database backup!" "${RED}"
return 1
fi
print_status "${CHECKMARK}" "Database backed up to: $backup_db" "${GREEN}"
# Try to vacuum the database
print_status "${INFO}" "Running VACUUM on database..." "${BLUE}"
if sudo -u plex sqlite3 "$main_db" "VACUUM;" 2>/dev/null; then
print_status "${CHECKMARK}" "Database VACUUM completed successfully" "${GREEN}"
# Test integrity again
if sudo -u plex sqlite3 "$main_db" "PRAGMA integrity_check;" 2>/dev/null | grep -q "ok"; then
print_status "${CHECKMARK}" "Database integrity restored!" "${GREEN}"
print_status "${INFO}" "You can now try starting Plex again" "${BLUE}"
return 0
else
print_status "${CROSS}" "Database still corrupted after VACUUM" "${RED}"
fi
else
print_status "${CROSS}" "VACUUM operation failed" "${RED}"
fi
# Try reindex as last resort
print_status "${INFO}" "Attempting REINDEX operation..." "${BLUE}"
if sudo -u plex sqlite3 "$main_db" "REINDEX;" 2>/dev/null; then
print_status "${CHECKMARK}" "Database REINDEX completed" "${GREEN}"
# Test integrity one more time
if sudo -u plex sqlite3 "$main_db" "PRAGMA integrity_check;" 2>/dev/null | grep -q "ok"; then
print_status "${CHECKMARK}" "Database integrity restored after REINDEX!" "${GREEN}"
return 0
fi
fi
print_status "${CROSS}" "All repair attempts failed" "${RED}"
print_status "${INFO}" "Manual intervention required. Consider:" "${YELLOW}"
echo -e "${DIM}${YELLOW} 1. Restore from external backup using restore-plex.sh${RESET}"
echo -e "${DIM}${YELLOW} 2. Use nuclear recovery: ./nuclear-plex-recovery.sh${RESET}"
echo -e "${DIM}${YELLOW} 3. Check corrupted database moved to: $corrupted_dir${RESET}"
return 1
}
# 🔍 Function to check database integrity
check_database_integrity() {
print_status "${INFO}" "Checking database integrity..." "${BLUE}"
local db_dir="/var/lib/plexmediaserver/Library/Application Support/Plex Media Server/Plug-in Support/Databases"
local main_db="$db_dir/com.plexapp.plugins.library.db"
local repair_script="${SCRIPT_DIR}/plex-database-repair.sh"
local main_db="$PLEX_DB_DIR/com.plexapp.plugins.library.db"
if [[ ! -f "$main_db" ]]; then
print_status "${CROSS}" "Main database not found at: $main_db" "${RED}"
return 1
fi
# Use shared repair script for integrity checking if available
if [[ -f "$repair_script" ]]; then
if "$repair_script" check "$main_db" >/dev/null 2>&1; then
print_status "${CHECKMARK}" "Database integrity check passed" "${GREEN}"
return 0
else
print_status "${CROSS}" "Database integrity check failed!" "${RED}"
print_status "${INFO}" "Consider running database repair: plex repair" "${YELLOW}"
return 1
fi
else
# Fallback to basic sqlite3 check
if ! sudo -u plex sqlite3 "$main_db" "PRAGMA integrity_check;" >/dev/null 2>&1; then
print_status "${CROSS}" "Database integrity check failed!" "${RED}"
print_status "${INFO}" "Consider running database repair or restore from backup" "${YELLOW}"
return 1
# Prefer Plex's bundled SQLite for ICU compatibility
local sqlite_bin="sqlite3"
if [[ -x "$PLEX_SQLITE" ]]; then
sqlite_bin="$PLEX_SQLITE"
fi
print_status "${CHECKMARK}" "Database integrity check passed" "${GREEN}"
# Clean up stale WAL/SHM journals left by non-clean shutdowns.
if [[ -f "${main_db}-wal" ]]; then
print_status "${INFO}" "WAL journal found — attempting checkpoint before integrity check..." "${BLUE}"
if ! sudo -u plex "$sqlite_bin" "$main_db" "PRAGMA wal_checkpoint(TRUNCATE);" 2>/dev/null; then
print_status "${INFO}" "WAL checkpoint failed (non-critical, continuing check)" "${YELLOW}"
fi
fi
# --- Standard structural integrity check ---
local integrity_output
local sqlite_exit_code=0
integrity_output=$(sudo -u plex "$sqlite_bin" "$main_db" "PRAGMA integrity_check;" 2>&1) || sqlite_exit_code=$?
local struct_ok=true
if [[ "$integrity_output" == "ok" ]]; then
print_status "${CHECKMARK}" "Database structural integrity check passed" "${GREEN}"
elif [[ $sqlite_exit_code -ne 0 && -z "$integrity_output" ]]; then
print_status "${CROSS}" "sqlite3 failed to open the database (exit code $sqlite_exit_code)" "${RED}"
print_status "${INFO}" "Check file permissions and ensure Plex is fully stopped" "${YELLOW}"
return 1
else
struct_ok=false
print_status "${CROSS}" "Database structural integrity check reported issues:" "${RED}"
echo "$integrity_output" | head -n 5 | while IFS= read -r line; do
echo -e "${DIM}${YELLOW} $line${RESET}"
done
local total_lines
total_lines=$(echo "$integrity_output" | wc -l)
if (( total_lines > 5 )); then
echo -e "${DIM}${YELLOW} ... and $((total_lines - 5)) more issue(s)${RESET}"
fi
fi
# --- FTS index integrity (the most common unreported corruption) ---
local fts_ok=true
if ! check_fts_integrity "$main_db" "$sqlite_bin"; then
fts_ok=false
fi
if [[ "$struct_ok" == true && "$fts_ok" == true ]]; then
return 0
fi
print_status "${INFO}" "Consider running database repair: ${SCRIPT_NAME} repair" "${YELLOW}"
_hint_install_dbrepair
return 1
}
# <20>🚀 Enhanced start function
@@ -341,11 +613,12 @@ start_plex() {
# Reset any failed state first
sudo systemctl reset-failed "$PLEX_SERVICE" 2>/dev/null || true
# Check database integrity before starting
# Check database integrity before starting (warn only — don't block startup).
# Many "failures" are benign WAL journal leftovers that Plex resolves on its own.
if ! check_database_integrity; then
print_status "${CROSS}" "Database integrity issues detected. Service may fail to start." "${RED}"
echo -e "${DIM}${YELLOW} Try: sudo systemctl stop plexmediaserver && sudo -u plex sqlite3 /var/lib/plexmediaserver/Library/Application\ Support/Plex\ Media\ Server/Plug-in\ Support/Databases/com.plexapp.plugins.library.db 'VACUUM;'${RESET}"
return 1
print_status "${INFO}" "Database integrity issues detected — starting Plex anyway (it may self-repair)." "${YELLOW}"
echo -e "${DIM}${YELLOW} If Plex fails to start, run: ${SCRIPT_NAME} repair${RESET}"
_hint_install_dbrepair
fi
print_status "${INFO}" "Attempting to start service..." "${BLUE}"
@@ -502,25 +775,57 @@ show_help() {
echo -e "${BOLD}${WHITE}Usage:${RESET} ${CYAN}${SCRIPT_NAME}${RESET} ${YELLOW}<command>${RESET}"
echo ""
echo -e "${BOLD}${WHITE}Available Commands:${RESET}"
echo -e " ${GREEN}${BOLD}start${RESET} ${ROCKET} Start Plex Media Server"
echo -e " ${YELLOW}${BOLD}stop${RESET} ${STOP_SIGN} Stop Plex Media Server"
echo -e " ${BLUE}${BOLD}restart${RESET} ${RECYCLE} Restart Plex Media Server"
echo -e " ${CYAN}${BOLD}status${RESET} ${INFO} Show detailed service status"
echo -e " ${PURPLE}${BOLD}scan${RESET} ${SPARKLES} Library scanner operations"
echo -e " ${RED}${BOLD}repair${RESET} [!] Repair database corruption issues"
echo -e " ${RED}${BOLD}nuclear${RESET} [!!] Nuclear database recovery (last resort)"
echo -e " ${PURPLE}${BOLD}help${RESET} [*] Show this help message"
printf " ${GREEN}${BOLD}%-18s${RESET} %s %s\n" "start" "${ROCKET}" "Start Plex Media Server"
printf " ${YELLOW}${BOLD}%-18s${RESET} %s %s\n" "stop" "${STOP_SIGN}" "Stop Plex Media Server"
printf " ${BLUE}${BOLD}%-18s${RESET} %s %s\n" "restart" "${RECYCLE}" "Restart Plex Media Server"
printf " ${CYAN}${BOLD}%-18s${RESET} %s %s\n" "status" "${INFO}" "Show detailed service status"
printf " ${PURPLE}${BOLD}%-18s${RESET} %s %s\n" "scan" "${SPARKLES}" "Library scanner operations"
printf " ${RED}${BOLD}%-18s${RESET} %s %s\n" "repair" "[!]" "Repair database corruption issues"
printf " ${RED}${BOLD}%-18s${RESET} %s %s\n" "nuclear" "[!!]" "Nuclear database recovery (last resort)"
printf " ${CYAN}${BOLD}%-18s${RESET} %s %s\n" "backups" "[#]" "List and manage database backup files"
printf " ${GREEN}${BOLD}%-18s${RESET} %s %s\n" "install-dbrepair" "[+]" "Install or update DBRepair tool"
printf " ${WHITE}${BOLD}%-18s${RESET} %s %s\n" "tui" "[>]" "Launch interactive TUI dashboard"
printf " ${PURPLE}${BOLD}%-18s${RESET} %s %s\n" "help" "${HOURGLASS}" "Show this help message"
echo ""
echo -e "${DIM}${WHITE}Examples:${RESET}"
echo -e " ${DIM}${SCRIPT_NAME} start # Start the Plex service${RESET}"
echo -e " ${DIM}${SCRIPT_NAME} status # Show current status${RESET}"
echo -e " ${DIM}${SCRIPT_NAME} scan # Launch library scanner interface${RESET}"
echo -e " ${DIM}${SCRIPT_NAME} repair # Fix database issues${RESET}"
echo -e " ${DIM}${SCRIPT_NAME} nuclear # Complete database replacement${RESET}"
printf " ${DIM}%-40s # %s${RESET}\n" "${SCRIPT_NAME} start" "Start the Plex service"
printf " ${DIM}%-40s # %s${RESET}\n" "${SCRIPT_NAME} status" "Show current status"
printf " ${DIM}%-40s # %s${RESET}\n" "${SCRIPT_NAME} scan" "Launch library scanner interface"
printf " ${DIM}%-40s # %s${RESET}\n" "${SCRIPT_NAME} repair" "Fix database issues"
printf " ${DIM}%-40s # %s${RESET}\n" "${SCRIPT_NAME} nuclear" "Complete database replacement"
printf " ${DIM}%-40s # %s${RESET}\n" "${SCRIPT_NAME} backups" "List and manage DB backups"
printf " ${DIM}%-40s # %s${RESET}\n" "${SCRIPT_NAME} backups delete" "Interactive backup deletion"
printf " ${DIM}%-40s # %s${RESET}\n" "${SCRIPT_NAME} backups delete --name foo" "Delete by name pattern"
printf " ${DIM}%-40s # %s${RESET}\n" "${SCRIPT_NAME} install-dbrepair" "Install/update DBRepair"
printf " ${DIM}%-40s # %s${RESET}\n" "${SCRIPT_NAME} tui" "Launch full TUI dashboard"
echo ""
}
# 📚 Function to launch library scanner
# <EFBFBD> Function to launch the Python TUI dashboard
launch_tui() {
local venv_dir="${SCRIPT_DIR}/.venv"
local tui_app="${SCRIPT_DIR}/tui/app.py"
if [[ ! -f "$tui_app" ]]; then
print_status "${CROSS}" "TUI application not found: $tui_app" "${RED}"
return 1
fi
# Create venv and install dependencies if needed
if [[ ! -d "$venv_dir" ]]; then
print_status "${INFO}" "Setting up Python environment (first run)..." "${BLUE}"
if ! python3 -m venv "$venv_dir" 2>/dev/null; then
print_status "${CROSS}" "Failed to create Python venv. Install python3-venv." "${RED}"
return 1
fi
"${venv_dir}/bin/pip" install --quiet textual
print_status "${CHECKMARK}" "Python environment ready" "${GREEN}"
fi
exec "${venv_dir}/bin/python3" "$tui_app"
}
# <20>📚 Function to launch library scanner
launch_scanner() {
print_status "${SPARKLES}" "Launching Plex Library Scanner..." "${PURPLE}"
@@ -684,6 +989,23 @@ main() {
"nuclear"|"nuke")
nuclear_recovery
;;
"install-dbrepair"|"update-dbrepair"|"dbrepair")
install_or_update_dbrepair
;;
"tui"|"dashboard"|"ui")
launch_tui
;;
"backups"|"backup-list")
if [[ $# -ge 2 && "${2,,}" == "delete" ]]; then
if [[ $# -ge 4 && "${3}" == "--name" ]]; then
delete_db_backup_by_name "$4"
else
delete_db_backups_interactive
fi
else
list_db_backups
fi
;;
"help"|"--help"|"-h")
show_help
;;

View File

@@ -44,19 +44,21 @@
set -euo pipefail
# 🎨 Color definitions for styled output
readonly RED='\033[0;31m'
readonly GREEN='\033[0;32m'
readonly YELLOW='\033[1;33m'
readonly BLUE='\033[0;34m'
readonly PURPLE='\033[0;35m'
readonly CYAN='\033[0;36m'
readonly WHITE='\033[1;37m'
readonly BOLD='\033[1m'
readonly DIM='\033[2m'
readonly RESET='\033[0m'
readonly RED=$'\033[0;31m'
readonly GREEN=$'\033[0;32m'
readonly YELLOW=$'\033[1;33m'
readonly BLUE=$'\033[0;34m'
readonly PURPLE=$'\033[0;35m'
readonly CYAN=$'\033[0;36m'
readonly WHITE=$'\033[1;37m'
readonly BOLD=$'\033[1m'
readonly DIM=$'\033[2m'
readonly RESET=$'\033[0m'
# 🔧 Configuration
readonly PLEX_SERVICE="plexmediaserver"
readonly PLEX_PREFS="/var/lib/plexmediaserver/Library/Application Support/Plex Media Server/Preferences.xml"
readonly PLEX_API_BASE="http://localhost:32400"
readonly SCRIPT_NAME="$(basename "$0")"
readonly SCRIPT_DIR="$(dirname "$(readlink -f "$0")")"
readonly LOG_DIR="${SCRIPT_DIR}/../logs"
@@ -144,6 +146,126 @@ find_scanner() {
return 1
}
# 🚀 Run Plex Media Scanner as the plex user with correct environment
# Usage: run_scanner [args...] — runs and returns exit code
run_scanner() {
sudo -u plex \
env LD_LIBRARY_PATH=/usr/lib/plexmediaserver \
PLEX_MEDIA_SERVER_APPLICATION_SUPPORT_DIR="/var/lib/plexmediaserver/Library/Application Support" \
"$SCANNER_PATH" "$@"
}
# 🔑 Get Plex authentication token from Preferences.xml
get_plex_token() {
local token
token=$(sudo grep -oP 'PlexOnlineToken="\K[^"]+' "$PLEX_PREFS" 2>/dev/null)
if [[ -z "$token" ]]; then
log_verbose "Could not read Plex token from Preferences.xml"
return 1
fi
echo "$token"
}
# 📡 Trigger a library scan via the Plex API (replaces deprecated --scan)
# Usage: api_scan_section <section_id>
api_scan_section() {
local section_id="$1"
local token
if ! token=$(get_plex_token); then
log_verbose "Cannot scan: no Plex token available"
return 1
fi
local http_code
http_code=$(curl -s -o /dev/null -w "%{http_code}" \
-X GET "${PLEX_API_BASE}/library/sections/${section_id}/refresh?X-Plex-Token=${token}")
if [[ "$http_code" =~ ^2 ]]; then
return 0
else
log_verbose "API scan failed for section $section_id (HTTP $http_code)"
return 1
fi
}
# 📡 Trigger a metadata refresh via the Plex API (replaces deprecated --refresh)
# Usage: api_refresh_section <section_id> [force]
api_refresh_section() {
local section_id="$1"
local force="${2:-false}"
local token
if ! token=$(get_plex_token); then
log_verbose "Cannot refresh: no Plex token available"
return 1
fi
local url="${PLEX_API_BASE}/library/sections/${section_id}/refresh?X-Plex-Token=${token}"
if [[ "$force" == "true" ]]; then
url+="&force=1"
fi
local http_code
http_code=$(curl -s -o /dev/null -w "%{http_code}" -X GET "$url")
if [[ "$http_code" =~ ^2 ]]; then
return 0
else
log_verbose "API refresh failed for section $section_id (HTTP $http_code)"
return 1
fi
}
# 📡 Trigger media analysis via the Plex API (replaces deprecated --analyze)
# Usage: api_analyze_section <section_id>
api_analyze_section() {
local section_id="$1"
local token
if ! token=$(get_plex_token); then
log_verbose "Cannot analyze: no Plex token available"
return 1
fi
local http_code
http_code=$(curl -s -o /dev/null -w "%{http_code}" \
-X PUT "${PLEX_API_BASE}/library/sections/${section_id}/analyze?X-Plex-Token=${token}")
if [[ "$http_code" =~ ^2 ]]; then
return 0
else
log_verbose "API analyze failed for section $section_id (HTTP $http_code)"
return 1
fi
}
# 📡 List library sections via the Plex API
# Output format: "key|title|type" per line (e.g. "1|Movies|movie")
api_list_sections() {
local token
if ! token=$(get_plex_token); then
return 1
fi
local xml
if ! xml=$(curl -fsS "${PLEX_API_BASE}/library/sections?X-Plex-Token=${token}" 2>/dev/null); then
log_verbose "Plex API request failed"
return 1
fi
# Parse XML: extract key, title, and type from <Directory> elements
echo "$xml" | grep -oP '<Directory[^>]*>' | while IFS= read -r tag; do
local key title type
key=$(echo "$tag" | grep -oP 'key="\K[^"]+')
title=$(echo "$tag" | grep -oP 'title="\K[^"]+')
type=$(echo "$tag" | grep -oP 'type="\K[^"]+')
echo "${key}|${title}|${type}"
done
}
# 📋 Get just the section IDs from the API (one per line)
api_list_section_ids() {
api_list_sections | cut -d'|' -f1
}
# 🏥 Function to check Plex service status
check_plex_service() {
log_verbose "Checking Plex service status..."
@@ -166,38 +288,24 @@ list_libraries() {
return 2
fi
if ! find_scanner; then
return 3
local sections
if ! sections=$(api_list_sections) || [[ -z "$sections" ]]; then
print_status "${CROSS}" "Failed to retrieve library sections from Plex API" "${RED}"
return 5
fi
# Set library path for Linux
export LD_LIBRARY_PATH=/usr/lib/plexmediaserver:${LD_LIBRARY_PATH:-}
local output
if output=$("$SCANNER_PATH" --list 2>&1); then
echo ""
echo -e "${BOLD}${CYAN}Available Library Sections:${RESET}"
echo -e "${DIM}${CYAN}=========================${RESET}"
# Parse and format the output
echo "$output" | while IFS= read -r line; do
if [[ "$line" =~ ^[[:space:]]*([0-9]+):[[:space:]]*(.+)$ ]]; then
local section_id="${BASH_REMATCH[1]}"
local section_name="${BASH_REMATCH[2]}"
echo -e "${GREEN}${BOLD} ${section_id}:${RESET} ${WHITE}${section_name}${RESET}"
elif [[ -n "$line" ]]; then
echo -e "${DIM} $line${RESET}"
fi
done
while IFS='|' read -r key title type; do
[[ -n "$key" ]] || continue
printf " ${GREEN}${BOLD}%-4s${RESET} ${WHITE}%-30s${RESET} ${DIM}(%s)${RESET}\n" "${key}:" "$title" "$type"
done <<< "$sections"
echo ""
print_status "${CHECKMARK}" "Library listing completed" "${GREEN}"
return 0
else
print_status "${CROSS}" "Failed to list libraries" "${RED}"
echo -e "${DIM}${RED}Error output: $output${RESET}"
return 5
fi
}
# 🔍 Function to validate section ID
@@ -209,10 +317,9 @@ validate_section_id() {
return 1
fi
# Get list of valid section IDs
export LD_LIBRARY_PATH=/usr/lib/plexmediaserver:${LD_LIBRARY_PATH:-}
# Get list of valid section IDs via API
local valid_ids
if valid_ids=$("$SCANNER_PATH" --list 2>/dev/null | grep -oE '^[[:space:]]*[0-9]+:' | grep -oE '[0-9]+'); then
if valid_ids=$(api_list_section_ids) && [[ -n "$valid_ids" ]]; then
if echo "$valid_ids" | grep -q "^${section_id}$"; then
return 0
else
@@ -237,9 +344,7 @@ scan_library() {
return 4
fi
export LD_LIBRARY_PATH=/usr/lib/plexmediaserver:${LD_LIBRARY_PATH:-}
if "$SCANNER_PATH" --scan --section "$section_id" ${VERBOSE:+--verbose}; then
if api_scan_section "$section_id"; then
print_status "${CHECKMARK}" "Library section $section_id scan completed" "${GREEN}"
return 0
else
@@ -250,16 +355,15 @@ scan_library() {
print_status "${ROCKET}" "Scanning all libraries for new media..." "${BLUE}"
# Get all section IDs and scan each one
export LD_LIBRARY_PATH=/usr/lib/plexmediaserver:${LD_LIBRARY_PATH:-}
local section_ids
if section_ids=$("$SCANNER_PATH" --list 2>/dev/null | grep -oE '^[[:space:]]*[0-9]+:' | grep -oE '[0-9]+'); then
if section_ids=$(api_list_section_ids) && [[ -n "$section_ids" ]]; then
local failed_sections=()
while IFS= read -r id; do
[[ -n "$id" ]] || continue
print_status "${INFO}" "Scanning section $id..." "${YELLOW}"
if "$SCANNER_PATH" --scan --section "$id" ${VERBOSE:+--verbose}; then
if api_scan_section "$id"; then
print_status "${CHECKMARK}" "Section $id scanned successfully" "${GREEN}"
else
print_status "${CROSS}" "Failed to scan section $id" "${RED}"
@@ -286,11 +390,6 @@ refresh_library() {
local section_id="$1"
local force="${2:-false}"
local force_flag=""
if [[ "$force" == "true" ]]; then
force_flag="--force"
fi
if [[ -n "$section_id" ]]; then
print_status "${RECYCLE}" "Refreshing metadata for library section $section_id..." "${BLUE}"
@@ -298,9 +397,7 @@ refresh_library() {
return 4
fi
export LD_LIBRARY_PATH=/usr/lib/plexmediaserver:${LD_LIBRARY_PATH:-}
if "$SCANNER_PATH" --refresh $force_flag --section "$section_id" ${VERBOSE:+--verbose}; then
if api_refresh_section "$section_id" "$force"; then
print_status "${CHECKMARK}" "Library section $section_id metadata refreshed" "${GREEN}"
return 0
else
@@ -311,16 +408,15 @@ refresh_library() {
print_status "${RECYCLE}" "Refreshing metadata for all libraries..." "${BLUE}"
# Get all section IDs and refresh each one
export LD_LIBRARY_PATH=/usr/lib/plexmediaserver:${LD_LIBRARY_PATH:-}
local section_ids
if section_ids=$("$SCANNER_PATH" --list 2>/dev/null | grep -oE '^[[:space:]]*[0-9]+:' | grep -oE '[0-9]+'); then
if section_ids=$(api_list_section_ids) && [[ -n "$section_ids" ]]; then
local failed_sections=()
while IFS= read -r id; do
[[ -n "$id" ]] || continue
print_status "${INFO}" "Refreshing section $id..." "${YELLOW}"
if "$SCANNER_PATH" --refresh $force_flag --section "$id" ${VERBOSE:+--verbose}; then
if api_refresh_section "$id" "$force"; then
print_status "${CHECKMARK}" "Section $id refreshed successfully" "${GREEN}"
else
print_status "${CROSS}" "Failed to refresh section $id" "${RED}"
@@ -347,11 +443,6 @@ analyze_library() {
local section_id="$1"
local deep="${2:-false}"
local analyze_flag="--analyze"
if [[ "$deep" == "true" ]]; then
analyze_flag="--analyze-deeply"
fi
if [[ -n "$section_id" ]]; then
print_status "${SEARCH}" "Analyzing media in library section $section_id..." "${BLUE}"
@@ -359,9 +450,7 @@ analyze_library() {
return 4
fi
export LD_LIBRARY_PATH=/usr/lib/plexmediaserver:${LD_LIBRARY_PATH:-}
if "$SCANNER_PATH" $analyze_flag --section "$section_id" ${VERBOSE:+--verbose}; then
if api_analyze_section "$section_id"; then
print_status "${CHECKMARK}" "Library section $section_id analysis completed" "${GREEN}"
return 0
else
@@ -372,16 +461,15 @@ analyze_library() {
print_status "${SEARCH}" "Analyzing media in all libraries..." "${BLUE}"
# Get all section IDs and analyze each one
export LD_LIBRARY_PATH=/usr/lib/plexmediaserver:${LD_LIBRARY_PATH:-}
local section_ids
if section_ids=$("$SCANNER_PATH" --list 2>/dev/null | grep -oE '^[[:space:]]*[0-9]+:' | grep -oE '[0-9]+'); then
if section_ids=$(api_list_section_ids) && [[ -n "$section_ids" ]]; then
local failed_sections=()
while IFS= read -r id; do
[[ -n "$id" ]] || continue
print_status "${INFO}" "Analyzing section $id..." "${YELLOW}"
if "$SCANNER_PATH" $analyze_flag --section "$id" ${VERBOSE:+--verbose}; then
if api_analyze_section "$id"; then
print_status "${CHECKMARK}" "Section $id analyzed successfully" "${GREEN}"
else
print_status "${CROSS}" "Failed to analyze section $id" "${RED}"
@@ -414,9 +502,7 @@ generate_thumbnails() {
return 4
fi
export LD_LIBRARY_PATH=/usr/lib/plexmediaserver:${LD_LIBRARY_PATH:-}
if "$SCANNER_PATH" --generate --section "$section_id" ${VERBOSE:+--verbose}; then
if run_scanner --generate --section "$section_id" ${VERBOSE:+--verbose}; then
print_status "${CHECKMARK}" "Thumbnails generated for library section $section_id" "${GREEN}"
return 0
else
@@ -427,16 +513,15 @@ generate_thumbnails() {
print_status "${SPARKLES}" "Generating thumbnails for all libraries..." "${BLUE}"
# Get all section IDs and generate thumbnails for each one
export LD_LIBRARY_PATH=/usr/lib/plexmediaserver:${LD_LIBRARY_PATH:-}
local section_ids
if section_ids=$("$SCANNER_PATH" --list 2>/dev/null | grep -oE '^[[:space:]]*[0-9]+:' | grep -oE '[0-9]+'); then
if section_ids=$(api_list_section_ids) && [[ -n "$section_ids" ]]; then
local failed_sections=()
while IFS= read -r id; do
[[ -n "$id" ]] || continue
print_status "${INFO}" "Generating thumbnails for section $id..." "${YELLOW}"
if "$SCANNER_PATH" --generate --section "$id" ${VERBOSE:+--verbose}; then
if run_scanner --generate --section "$id" ${VERBOSE:+--verbose}; then
print_status "${CHECKMARK}" "Section $id thumbnails generated successfully" "${GREEN}"
else
print_status "${CROSS}" "Failed to generate thumbnails for section $id" "${RED}"
@@ -473,9 +558,7 @@ show_library_tree() {
return 4
fi
export LD_LIBRARY_PATH=/usr/lib/plexmediaserver:${LD_LIBRARY_PATH:-}
if "$SCANNER_PATH" --tree --section "$section_id"; then
if run_scanner --tree --section "$section_id"; then
print_status "${CHECKMARK}" "Tree display completed for library section $section_id" "${GREEN}"
return 0
else
@@ -531,15 +614,11 @@ interactive_mode() {
echo -e "${DIM}Select an operation to perform:${RESET}"
echo ""
# First, check if Plex is running and scanner is available
# First, check if Plex is running
if ! check_plex_service; then
return 2
fi
if ! find_scanner; then
return 3
fi
while true; do
echo -e "${BOLD}Available Operations:${RESET}"
echo -e "${GREEN}1)${RESET} List all libraries"
@@ -638,6 +717,9 @@ interactive_mode() {
;;
5)
echo ""
if ! find_scanner; then
print_status "${CROSS}" "Scanner binary required for thumbnail generation" "${RED}"
else
echo -e "${BOLD}Thumbnail Generation Options:${RESET}"
echo -e "${GREEN}1)${RESET} Generate for all libraries"
echo -e "${GREEN}2)${RESET} Generate for specific library"
@@ -655,11 +737,16 @@ interactive_mode() {
print_status "${CROSS}" "Invalid choice" "${RED}"
;;
esac
fi
;;
6)
echo ""
if ! find_scanner; then
print_status "${CROSS}" "Scanner binary required for tree display" "${RED}"
else
read -p "$(echo -e "${BOLD}Enter section ID to show tree:${RESET} ")" section_id
show_library_tree "$section_id"
fi
;;
q|Q)
print_status "${INFO}" "Goodbye!" "${CYAN}"
@@ -716,10 +803,6 @@ main() {
exit 2
fi
if ! find_scanner; then
exit 3
fi
# Handle commands
case "${1,,}" in
"list")
@@ -740,10 +823,12 @@ main() {
analyze_library "$section_id" "$deep"
;;
"generate"|"thumbnails")
if ! find_scanner; then exit 3; fi
local section_id="${2:-}"
generate_thumbnails "$section_id"
;;
"tree")
if ! find_scanner; then exit 3; fi
local section_id="$2"
if [[ -z "$section_id" ]]; then
print_status "${CROSS}" "Section ID required for tree command" "${RED}"

View File

@@ -54,12 +54,12 @@
set -e
# Color codes for output
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
CYAN='\033[0;36m'
NC='\033[0m' # No Color
RED=$'\033[0;31m'
GREEN=$'\033[0;32m'
YELLOW=$'\033[1;33m'
BLUE=$'\033[0;34m'
CYAN=$'\033[0;36m'
NC=$'\033[0m' # No Color
# Test configuration
TEST_DIR="/tmp/plex-backup-test-$(date +%s)"

0
plex/tui/__init__.py Normal file
View File

685
plex/tui/app.py Normal file
View File

@@ -0,0 +1,685 @@
#!/usr/bin/env python3
"""Plex Management TUI — a Textual-based terminal interface for Plex operations."""
from __future__ import annotations
import asyncio
import sys
from pathlib import Path
from typing import Callable, Coroutine
from textual import on, work
from textual.app import App, ComposeResult
from textual.containers import Horizontal, Vertical, VerticalScroll
from textual.screen import ModalScreen
from textual.widgets import (
Button,
Footer,
Header,
Input,
Label,
ListItem,
ListView,
Log,
Static,
)
from backend import CommandResult, run_command # noqa: local import
from backend import cache_sudo, is_sudo_failure # noqa: local import
# ── Path to stylesheets ─────────────────────────────────────────────────
CSS_PATH = Path(__file__).parent / "plex_tui.tcss"
class NavListView(ListView):
"""ListView that only changes selection on click/keyboard, not mouse hover."""
def on_mouse_move(self, event) -> None:
"""Swallow mouse-move so the cursor doesn't follow the pointer."""
event.stop()
# ── Navigation items ────────────────────────────────────────────────────
NAV_SECTIONS: list[tuple[str, str]] = [
("service", "⏻ Service Control"),
("libraries", "📚 Library Scanner"),
("backup", "💾 Backup & Validate"),
("monitor", "📊 Monitoring"),
("database", "🗄️ Database Management"),
("recovery", "🔧 Recovery"),
("queries", "🔍 Queries & Stats"),
("testing", "🧪 Testing"),
]
# ── Confirmation dialog ─────────────────────────────────────────────────
class ConfirmDialog(ModalScreen[bool]):
"""A modal yes/no confirmation dialog."""
def __init__(self, title: str, body: str) -> None:
super().__init__()
self._title = title
self._body = body
def compose(self) -> ComposeResult:
with Vertical(id="dialog-container"):
yield Label(self._title, id="dialog-title")
yield Static(self._body, id="dialog-body")
with Horizontal(classes="dialog-buttons"):
yield Button("Cancel", variant="default", id="dialog-cancel")
yield Button("Confirm", variant="error", id="dialog-confirm")
@on(Button.Pressed, "#dialog-confirm")
def _confirm(self) -> None:
self.dismiss(True)
@on(Button.Pressed, "#dialog-cancel")
def _cancel(self) -> None:
self.dismiss(False)
# ── Input dialog ─────────────────────────────────────────────────────────
class InputDialog(ModalScreen[str | None]):
"""Modal dialog that asks for a text value."""
def __init__(self, title: str, body: str, placeholder: str = "") -> None:
super().__init__()
self._title = title
self._body = body
self._placeholder = placeholder
def compose(self) -> ComposeResult:
with Vertical(id="input-dialog-container"):
yield Label(self._title, id="input-dialog-title")
yield Static(self._body, id="input-dialog-body")
yield Input(placeholder=self._placeholder, id="input-value")
with Horizontal(classes="dialog-buttons"):
yield Button("Cancel", variant="default", id="input-cancel")
yield Button("OK", variant="primary", id="input-ok")
@on(Button.Pressed, "#input-ok")
def _ok(self) -> None:
value = self.query_one("#input-value", Input).value.strip()
self.dismiss(value if value else None)
@on(Button.Pressed, "#input-cancel")
def _cancel(self) -> None:
self.dismiss(None)
@on(Input.Submitted)
def _submit(self) -> None:
self._ok()
# ── Password dialog ──────────────────────────────────────────────────────
class PasswordDialog(ModalScreen[str | None]):
"""Modal dialog that asks for a password (masked input)."""
def __init__(self, title: str, body: str) -> None:
super().__init__()
self._title = title
self._body = body
def compose(self) -> ComposeResult:
with Vertical(id="password-dialog-container"):
yield Label(self._title, id="password-dialog-title")
yield Static(self._body, id="password-dialog-body")
yield Input(placeholder="Password", password=True, id="password-value")
with Horizontal(classes="dialog-buttons"):
yield Button("Cancel", variant="default", id="password-cancel")
yield Button("Authenticate", variant="primary", id="password-ok")
@on(Button.Pressed, "#password-ok")
def _ok(self) -> None:
value = self.query_one("#password-value", Input).value
self.dismiss(value if value else None)
@on(Button.Pressed, "#password-cancel")
def _cancel(self) -> None:
self.dismiss(None)
@on(Input.Submitted)
def _submit(self) -> None:
self._ok()
# ── Section panels ───────────────────────────────────────────────────────
def _section_header(text: str) -> Static:
return Static(text, classes="section-header")
def _btn(label: str, btn_id: str, classes: str = "action-btn") -> Button:
return Button(label, id=btn_id, classes=classes)
class ServicePanel(Vertical):
def compose(self) -> ComposeResult:
yield _section_header("Plex Media Server — Service Control")
with Horizontal(classes="button-row"):
yield _btn("▶ Start", "svc-start", "action-btn-success")
yield _btn("⏹ Stop", "svc-stop", "action-btn-danger")
yield _btn("🔄 Restart", "svc-restart", "action-btn-warning")
with Horizontal(classes="button-row"):
yield _btn(" Status", "svc-status")
class LibraryPanel(Vertical):
def compose(self) -> ComposeResult:
yield _section_header("Library Scanner")
with Horizontal(classes="button-row"):
yield _btn("📋 List Libraries", "lib-list")
yield _btn("🔍 Scan All", "lib-scan-all")
yield _btn("🔍 Scan Section…", "lib-scan-id")
with Horizontal(classes="button-row"):
yield _btn("🔄 Refresh All", "lib-refresh-all")
yield _btn("🔄 Refresh Section…", "lib-refresh-id")
yield _btn("⚡ Force Refresh All", "lib-force-refresh")
with Horizontal(classes="button-row"):
yield _btn("📊 Analyze All", "lib-analyze-all")
yield _btn("📊 Analyze Section…", "lib-analyze-id")
class BackupPanel(Vertical):
def compose(self) -> ComposeResult:
yield _section_header("Backup & Validation")
with Horizontal(classes="button-row"):
yield _btn("💾 Run Backup", "bak-run")
yield _btn("💾 Backup + Auto-Repair", "bak-run-repair")
yield _btn("🔍 Integrity Check", "bak-integrity")
with Horizontal(classes="button-row"):
yield _btn("📋 List Backups", "bak-list")
yield _btn("✅ Validate Latest", "bak-validate-latest")
yield _btn("📝 Validation Report", "bak-validate-report")
with Horizontal(classes="button-row"):
yield _btn("📦 Built-in Status", "bak-builtin")
yield _btn("📦 Built-in Detailed", "bak-builtin-detail")
class MonitorPanel(Vertical):
def compose(self) -> ComposeResult:
yield _section_header("Backup Monitoring Dashboard")
with Horizontal(classes="button-row"):
yield _btn("📊 Show Dashboard", "mon-dashboard")
class DatabasePanel(Vertical):
def compose(self) -> ComposeResult:
yield _section_header("Database Management")
with Horizontal(classes="button-row"):
yield _btn("🔍 Integrity Check", "db-check")
yield _btn("🔧 Gentle Repair", "db-repair-gentle", "action-btn-warning")
yield _btn("⚠️ Force Repair", "db-repair-force", "action-btn-danger")
with Horizontal(classes="button-row"):
yield _btn("🧹 Cleanup (dry-run)", "db-cleanup-dry")
yield _btn("🧹 Cleanup (apply)", "db-cleanup-apply", "action-btn-warning")
yield _btn("📥 Install/Update DBRepair", "db-install-dbrepair")
class RecoveryPanel(Vertical):
def compose(self) -> ComposeResult:
yield _section_header("Recovery Operations")
with Horizontal(classes="button-row"):
yield _btn("🔎 Verify Backup Only", "rec-verify")
yield _btn("🧪 Nuclear Dry-Run", "rec-nuclear-dry")
with Horizontal(classes="button-row"):
yield _btn("☢️ Nuclear Recovery", "rec-nuclear-auto", "action-btn-danger")
yield Static(
" ⚠ Nuclear recovery is a last resort — it replaces your entire database from backup.",
classes="status-warning",
)
yield _section_header("Post-Recovery Validation")
with Horizontal(classes="button-row"):
yield _btn("⚡ Quick Validate", "rec-validate-quick")
yield _btn("🔍 Detailed Validate", "rec-validate-detailed")
yield _btn("📈 Performance Validate", "rec-validate-perf")
class QueryPanel(Vertical):
def compose(self) -> ComposeResult:
yield _section_header("Queries & Statistics")
with Horizontal(classes="button-row"):
yield _btn("🆕 Recent Additions (7d)", "qry-recent-7")
yield _btn("🆕 Recent Additions (30d)", "qry-recent-30")
yield _btn("🆕 Recent Additions…", "qry-recent-custom")
with Horizontal(classes="button-row"):
yield _btn("📊 Library Stats", "qry-stats")
yield _btn("🔢 Media Counts", "qry-counts")
yield _btn("📋 List Libraries", "qry-libraries")
with Horizontal(classes="button-row"):
yield _btn("💬 Custom SQL Query…", "qry-custom")
class TestingPanel(Vertical):
def compose(self) -> ComposeResult:
yield _section_header("Testing & Diagnostics")
with Horizontal(classes="button-row"):
yield _btn("⚡ Quick Smoke Tests", "test-quick")
yield _btn("🧪 Unit Tests", "test-unit")
yield _btn("🔗 Integration Tests", "test-integration")
with Horizontal(classes="button-row"):
yield _btn("🧹 Cleanup Test Artifacts", "test-cleanup")
# Map section key -> panel class
PANELS: dict[str, type] = {
"service": ServicePanel,
"libraries": LibraryPanel,
"backup": BackupPanel,
"monitor": MonitorPanel,
"database": DatabasePanel,
"recovery": RecoveryPanel,
"queries": QueryPanel,
"testing": TestingPanel,
}
# ── Main application ────────────────────────────────────────────────────
class PlexTUI(App):
"""Plex Management TUI."""
TITLE = "Plex Management Console"
CSS_PATH = CSS_PATH
BINDINGS = [
("q", "quit", "Quit"),
("d", "toggle_dark", "Toggle Dark"),
("c", "clear_log", "Clear Log"),
("s", "authenticate_sudo", "Sudo Auth"),
("1", "nav('service')", "Service"),
("2", "nav('libraries')", "Libraries"),
("3", "nav('backup')", "Backup"),
("4", "nav('monitor')", "Monitor"),
("5", "nav('database')", "Database"),
("6", "nav('recovery')", "Recovery"),
("7", "nav('queries')", "Queries"),
("8", "nav('testing')", "Testing"),
]
def __init__(self) -> None:
super().__init__()
self._current_section = "service"
# ── Composition ───────────────────────────────────────────────────
def compose(self) -> ComposeResult:
yield Header()
with Horizontal():
with Vertical(id="sidebar"):
yield Static("Plex Manager", id="sidebar-title")
yield NavListView(
*[
ListItem(Label(label, classes="nav-label"), id=f"nav-{key}")
for key, label in NAV_SECTIONS
],
id="nav-list",
)
with Vertical(id="main-content"):
# A scrollable area where the active section panel lives
yield VerticalScroll(ServicePanel(), id="panel-area")
yield Log(id="output-log", highlight=True, auto_scroll=True)
yield Footer()
def on_mount(self) -> None:
self._select_nav(0)
# Disable any-event mouse tracking (mode 1003) to prevent hover effects.
# Basic click tracking (mode 1000) remains active.
self.set_timer(0.1, self._disable_mouse_move_tracking)
def _disable_mouse_move_tracking(self) -> None:
sys.stdout.write("\x1b[?1003l")
sys.stdout.flush()
# ── Navigation ────────────────────────────────────────────────────
def _select_nav(self, index: int) -> None:
nav_list = self.query_one("#nav-list", ListView)
nav_list.index = index
@on(ListView.Selected, "#nav-list")
def _on_nav_selected(self, event: ListView.Selected) -> None:
item_id = event.item.id or ""
section_key = item_id.removeprefix("nav-")
if section_key in PANELS:
self._switch_section(section_key)
def _switch_section(self, key: str) -> None:
if key == self._current_section:
return
self._current_section = key
panel_area = self.query_one("#panel-area", VerticalScroll)
panel_area.remove_children()
panel_area.mount(PANELS[key]())
def action_nav(self, section: str) -> None:
keys = [k for k, _ in NAV_SECTIONS]
if section in keys:
self._select_nav(keys.index(section))
self._switch_section(section)
def action_clear_log(self) -> None:
self.query_one("#output-log", Log).clear()
# ── Sudo authentication ─────────────────────────────────────
def action_authenticate_sudo(self) -> None:
self._prompt_sudo()
def _prompt_sudo(self) -> None:
async def _cb(password: str | None) -> None:
if password is not None:
success = await cache_sudo(password)
if success:
self._log("🔓 Sudo credentials cached successfully.")
else:
self._log("✗ Sudo authentication failed. Wrong password?")
self.push_screen(
PasswordDialog(
"🔒 Sudo Authentication",
"Enter your password to cache sudo credentials:",
),
_cb, # type: ignore[arg-type]
)
# ── Logging helper ────────────────────────────────────────────────
def _log(self, text: str) -> None:
log_widget = self.query_one("#output-log", Log)
log_widget.write_line(text)
def _log_result(self, label: str, result: CommandResult) -> None:
status = "" if result.ok else ""
self._log(f"[{status}] {label}")
if result.output:
for line in result.output.splitlines():
self._log(f" {line}")
self._log("")
# ── Async operation runner ────────────────────────────────────────
@work(thread=False)
async def _run_op(
self,
label: str,
coro: Coroutine[None, None, CommandResult],
) -> None:
self._log(f"{label}...")
result = await coro
if is_sudo_failure(result):
self._log(f"[✗] {label}")
self._log(" 🔒 This command requires sudo. Press 's' to authenticate, then try again.")
self._log("")
else:
self._log_result(label, result)
def _ask_section_then_run(
self,
title: str,
op: Callable[[str], Coroutine[None, None, CommandResult]],
) -> None:
"""Prompt for a section ID, then run an async operation with it."""
async def _do(dialog_result: str | None) -> None:
if dialog_result is not None:
self._run_op(f"{title} (section {dialog_result})", op(dialog_result))
self.app.push_screen(
InputDialog(title, "Enter library section ID:", placeholder="e.g. 1"),
_do, # type: ignore[arg-type]
)
# ── Button dispatch ───────────────────────────────────────────────
@on(Button.Pressed)
def _on_button(self, event: Button.Pressed) -> None:
from backend import ( # local import to keep top-level light
backup_builtin_detailed,
backup_builtin_status,
backup_integrity_check,
backup_list,
backup_run,
backup_validate,
backup_validate_report,
custom_query,
db_check,
db_cleanup,
db_install_dbrepair,
db_repair_force,
db_repair_gentle,
library_analyze,
library_list,
library_refresh,
library_scan,
library_stats,
list_libraries_query,
media_counts,
monitor_dashboard,
nuclear_recovery_auto,
nuclear_recovery_dry_run,
nuclear_recovery_verify,
plex_restart,
plex_start,
plex_status,
plex_stop,
recent_additions,
run_tests_cleanup,
run_tests_integration,
run_tests_quick,
run_tests_unit,
validate_recovery,
)
bid = event.button.id or ""
# ── Service ────────────────────────────────
if bid == "svc-start":
self._run_op("Start Plex", plex_start())
elif bid == "svc-stop":
self._confirm_then_run(
"Stop Plex?",
"This will stop the Plex Media Server service.",
"Stop Plex",
plex_stop(),
)
elif bid == "svc-restart":
self._confirm_then_run(
"Restart Plex?",
"This will restart the Plex Media Server service.",
"Restart Plex",
plex_restart(),
)
elif bid == "svc-status":
self._run_op("Plex Status", plex_status())
# ── Libraries ──────────────────────────────
elif bid == "lib-list":
self._run_op("List Libraries", library_list())
elif bid == "lib-scan-all":
self._run_op("Scan All Libraries", library_scan())
elif bid == "lib-scan-id":
self._ask_section_then_run("Scan Library", library_scan)
elif bid == "lib-refresh-all":
self._run_op("Refresh All Libraries", library_refresh())
elif bid == "lib-refresh-id":
self._ask_section_then_run("Refresh Library", library_refresh)
elif bid == "lib-force-refresh":
self._run_op("Force Refresh All", library_refresh(force=True))
elif bid == "lib-analyze-all":
self._run_op("Analyze All Libraries", library_analyze())
elif bid == "lib-analyze-id":
self._ask_section_then_run("Analyze Library", library_analyze)
# ── Backup ─────────────────────────────────
elif bid == "bak-run":
self._run_op("Run Backup", backup_run())
elif bid == "bak-run-repair":
self._run_op("Backup + Auto-Repair", backup_run(auto_repair=True))
elif bid == "bak-integrity":
self._run_op("Integrity Check", backup_integrity_check())
elif bid == "bak-list":
self._run_op("List Backups", backup_list())
elif bid == "bak-validate-latest":
self._run_op("Validate Latest Backup", backup_validate(latest_only=True))
elif bid == "bak-validate-report":
self._run_op("Full Validation Report", backup_validate_report())
elif bid == "bak-builtin":
self._run_op("Built-in Backup Status", backup_builtin_status())
elif bid == "bak-builtin-detail":
self._run_op("Built-in Backup (Detailed)", backup_builtin_detailed())
# ── Monitor ────────────────────────────────
elif bid == "mon-dashboard":
self._run_op("Monitoring Dashboard", monitor_dashboard())
# ── Database ───────────────────────────────
elif bid == "db-check":
self._run_op("Database Integrity Check", db_check())
elif bid == "db-repair-gentle":
self._confirm_then_run(
"Gentle DB Repair?",
"This will attempt a gentle repair of the Plex database.",
"Gentle DB Repair",
db_repair_gentle(),
)
elif bid == "db-repair-force":
self._confirm_then_run(
"Force DB Repair?",
"This will aggressively repair the Plex database. "
"The Plex service will be stopped during repair.",
"Force DB Repair",
db_repair_force(),
)
elif bid == "db-cleanup-dry":
self._run_op("DB Cleanup (dry-run)", db_cleanup(dry_run=True))
elif bid == "db-cleanup-apply":
self._confirm_then_run(
"Apply DB Cleanup?",
"This will permanently remove temporary and recovery files "
"from the Plex database directory.",
"DB Cleanup",
db_cleanup(dry_run=False),
)
elif bid == "db-install-dbrepair":
self._run_op("Install/Update DBRepair", db_install_dbrepair())
# ── Recovery ───────────────────────────────
elif bid == "rec-verify":
self._run_op("Verify Backup Integrity", nuclear_recovery_verify())
elif bid == "rec-nuclear-dry":
self._run_op("Nuclear Recovery (dry-run)", nuclear_recovery_dry_run())
elif bid == "rec-nuclear-auto":
self._confirm_then_run(
"☢️ NUCLEAR RECOVERY",
"This will REPLACE YOUR ENTIRE DATABASE from the best available backup.\n\n"
"This is a LAST RESORT operation. Plex will be stopped during recovery.\n"
"Are you absolutely sure?",
"Nuclear Recovery",
nuclear_recovery_auto(),
)
elif bid == "rec-validate-quick":
self._run_op("Quick Recovery Validation", validate_recovery("--quick"))
elif bid == "rec-validate-detailed":
self._run_op(
"Detailed Recovery Validation", validate_recovery("--detailed")
)
elif bid == "rec-validate-perf":
self._run_op(
"Performance Recovery Validation", validate_recovery("--performance")
)
# ── Queries ────────────────────────────────
elif bid == "qry-recent-7":
self._run_op("Recent Additions (7 days)", recent_additions(7))
elif bid == "qry-recent-30":
self._run_op("Recent Additions (30 days)", recent_additions(30))
elif bid == "qry-recent-custom":
self._ask_days_then_run()
elif bid == "qry-stats":
self._run_op("Library Stats", library_stats())
elif bid == "qry-counts":
self._run_op("Media Counts", media_counts())
elif bid == "qry-libraries":
self._run_op("List Libraries", list_libraries_query())
elif bid == "qry-custom":
self._ask_sql_then_run()
# ── Testing ────────────────────────────────
elif bid == "test-quick":
self._run_op("Quick Smoke Tests", run_tests_quick())
elif bid == "test-unit":
self._run_op("Unit Tests", run_tests_unit())
elif bid == "test-integration":
self._run_op("Integration Tests", run_tests_integration())
elif bid == "test-cleanup":
self._run_op("Cleanup Test Artifacts", run_tests_cleanup())
# ── Confirmation helper ───────────────────────────────────────────
def _confirm_then_run(
self,
title: str,
body: str,
label: str,
coro: Coroutine[None, None, CommandResult],
) -> None:
async def _callback(confirmed: bool) -> None:
if confirmed:
self._run_op(label, coro)
self.push_screen(ConfirmDialog(title, body), _callback) # type: ignore[arg-type]
# ── Input prompt helpers ──────────────────────────────────────────
def _ask_days_then_run(self) -> None:
from backend import recent_additions
async def _cb(val: str | None) -> None:
if val is not None and val.isdigit():
self._run_op(
f"Recent Additions ({val} days)", recent_additions(int(val))
)
self.push_screen(
InputDialog(
"Recent Additions",
"Enter number of days:",
placeholder="e.g. 14",
),
_cb, # type: ignore[arg-type]
)
def _ask_sql_then_run(self) -> None:
from backend import custom_query
async def _cb(val: str | None) -> None:
if val is not None:
self._run_op(f"Custom Query", custom_query(val))
self.push_screen(
InputDialog(
"Custom SQL Query",
"Enter a SQL query to run against the Plex database:",
placeholder="SELECT count(*) FROM metadata_items",
),
_cb, # type: ignore[arg-type]
)
# ── Entry point ──────────────────────────────────────────────────────────
def main() -> None:
app = PlexTUI()
app.run()
if __name__ == "__main__":
main()

345
plex/tui/backend.py Normal file
View File

@@ -0,0 +1,345 @@
"""Plex Management TUI — backend helpers for running shell scripts."""
from __future__ import annotations
import asyncio
import os
import shlex
from dataclasses import dataclass, field
from pathlib import Path
SCRIPT_DIR = Path(__file__).resolve().parent.parent # /home/…/shell/plex
@dataclass
class CommandResult:
returncode: int
stdout: str
stderr: str
command: str
@property
def ok(self) -> bool:
return self.returncode == 0
@property
def output(self) -> str:
text = self.stdout
if self.stderr:
text += "\n" + self.stderr
return text.strip()
def _script(name: str) -> str:
"""Return absolute path to a script in the plex directory."""
return str(SCRIPT_DIR / name)
# ── Ansi stripping ──────────────────────────────────────────────────────
import re
_ANSI_RE = re.compile(r"\x1b\[[0-9;]*[A-Za-z]")
def strip_ansi(text: str) -> str:
return _ANSI_RE.sub("", text)
# ── Async command runner ────────────────────────────────────────────────
async def run_command(
cmd: str | list[str],
*,
sudo: bool = False,
timeout: int = 300,
) -> CommandResult:
"""Run a shell command asynchronously and return the result."""
if isinstance(cmd, list):
shell_cmd = " ".join(shlex.quote(c) for c in cmd)
else:
shell_cmd = cmd
if sudo:
shell_cmd = f"sudo {shell_cmd}"
env = os.environ.copy()
env["TERM"] = "dumb" # suppress colour in child scripts
try:
proc = await asyncio.create_subprocess_shell(
shell_cmd,
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE,
stdin=asyncio.subprocess.DEVNULL,
env=env,
)
stdout_bytes, stderr_bytes = await asyncio.wait_for(
proc.communicate(), timeout=timeout
)
return CommandResult(
returncode=proc.returncode or 0,
stdout=strip_ansi(stdout_bytes.decode(errors="replace")),
stderr=strip_ansi(stderr_bytes.decode(errors="replace")),
command=shell_cmd,
)
except asyncio.TimeoutError:
return CommandResult(
returncode=-1,
stdout="",
stderr=f"Command timed out after {timeout}s",
command=shell_cmd,
)
except Exception as exc:
return CommandResult(
returncode=-1,
stdout="",
stderr=str(exc),
command=shell_cmd,
)
# ── Sudo helpers ────────────────────────────────────────────────────────
_SUDO_FAIL_PATTERNS = [
"sudo: a password is required",
"sudo: a terminal is required",
"sudo: no tty present",
]
def is_sudo_failure(result: CommandResult) -> bool:
"""Return True if the command failed because of missing sudo credentials."""
if result.ok:
return False
text = (result.stdout + " " + result.stderr).lower()
return any(p in text for p in _SUDO_FAIL_PATTERNS)
async def check_sudo_cached() -> bool:
"""Check whether sudo credentials are currently cached (no password needed)."""
try:
proc = await asyncio.create_subprocess_exec(
"sudo", "-n", "true",
stdin=asyncio.subprocess.DEVNULL,
stdout=asyncio.subprocess.DEVNULL,
stderr=asyncio.subprocess.DEVNULL,
)
await proc.wait()
return proc.returncode == 0
except Exception:
return False
async def cache_sudo(password: str) -> bool:
"""Cache sudo credentials by validating the given password."""
try:
proc = await asyncio.create_subprocess_exec(
"sudo", "-S", "-v",
stdin=asyncio.subprocess.PIPE,
stdout=asyncio.subprocess.DEVNULL,
stderr=asyncio.subprocess.DEVNULL,
)
await proc.communicate(input=(password + "\n").encode())
return proc.returncode == 0
except Exception:
return False
# ── High-level operations (each returns a CommandResult) ────────────────
# Service management ─────────────────────────────────────────────────────
async def plex_start() -> CommandResult:
return await run_command(f"bash {_script('plex.sh')} start")
async def plex_stop() -> CommandResult:
return await run_command(f"bash {_script('plex.sh')} stop")
async def plex_restart() -> CommandResult:
return await run_command(f"bash {_script('plex.sh')} restart")
async def plex_status() -> CommandResult:
return await run_command(f"bash {_script('plex.sh')} status")
# Library scanning ───────────────────────────────────────────────────────
async def library_list() -> CommandResult:
return await run_command(f"bash {_script('scan-plex-libraries.sh')} list")
async def library_scan(section_id: str = "") -> CommandResult:
cmd = f"bash {_script('scan-plex-libraries.sh')} scan"
if section_id:
cmd += f" {shlex.quote(section_id)}"
return await run_command(cmd)
async def library_refresh(section_id: str = "", force: bool = False) -> CommandResult:
cmd = f"bash {_script('scan-plex-libraries.sh')} refresh"
if section_id:
cmd += f" {shlex.quote(section_id)}"
else:
cmd += ' ""'
if force:
cmd += " true"
return await run_command(cmd)
async def library_analyze(section_id: str = "") -> CommandResult:
cmd = f"bash {_script('scan-plex-libraries.sh')} analyze"
if section_id:
cmd += f" {shlex.quote(section_id)}"
return await run_command(cmd)
# Backup ─────────────────────────────────────────────────────────────────
async def backup_run(auto_repair: bool = False) -> CommandResult:
cmd = f"bash {_script('backup-plex.sh')} --non-interactive"
if auto_repair:
cmd += " --auto-repair"
return await run_command(cmd, timeout=600)
async def backup_integrity_check() -> CommandResult:
return await run_command(
f"bash {_script('backup-plex.sh')} --check-integrity --non-interactive"
)
async def backup_validate(latest_only: bool = True) -> CommandResult:
cmd = f"bash {_script('validate-plex-backups.sh')}"
if latest_only:
cmd += " --latest"
return await run_command(cmd)
async def backup_validate_report() -> CommandResult:
return await run_command(f"bash {_script('validate-plex-backups.sh')} --report")
async def backup_list() -> CommandResult:
return await run_command(f"bash {_script('plex.sh')} backups")
async def backup_builtin_status() -> CommandResult:
return await run_command(f"bash {_script('check-plex-builtin-backups.sh')}")
async def backup_builtin_detailed() -> CommandResult:
return await run_command(f"bash {_script('check-plex-builtin-backups.sh')} --detailed")
# Monitoring ──────────────────────────────────────────────────────────────
async def monitor_dashboard() -> CommandResult:
return await run_command(f"bash {_script('monitor-plex-backup.sh')}")
# Database management ────────────────────────────────────────────────────
async def db_check() -> CommandResult:
return await run_command(f"bash {_script('plex-db-manager.sh')} check")
async def db_repair_gentle() -> CommandResult:
return await run_command(f"bash {_script('plex-db-manager.sh')} repair --gentle")
async def db_repair_force() -> CommandResult:
return await run_command(f"bash {_script('plex-db-manager.sh')} repair --force")
async def db_cleanup(dry_run: bool = True) -> CommandResult:
cmd = f"bash {_script('cleanup-plex-databases.sh')}"
if dry_run:
cmd += " --dry-run"
cmd += " --verbose"
return await run_command(cmd)
async def db_install_dbrepair() -> CommandResult:
return await run_command(f"bash {_script('plex.sh')} install-dbrepair")
# Recovery ────────────────────────────────────────────────────────────────
async def nuclear_recovery_dry_run() -> CommandResult:
return await run_command(
f"bash {_script('nuclear-plex-recovery.sh')} --dry-run"
)
async def nuclear_recovery_auto() -> CommandResult:
return await run_command(
f"bash {_script('nuclear-plex-recovery.sh')} --auto", timeout=600
)
async def nuclear_recovery_verify() -> CommandResult:
return await run_command(
f"bash {_script('nuclear-plex-recovery.sh')} --verify-only"
)
async def validate_recovery(mode: str = "--quick") -> CommandResult:
return await run_command(
f"bash {_script('validate-plex-recovery.sh')} {mode}"
)
# Queries ─────────────────────────────────────────────────────────────────
async def recent_additions(days: int = 7) -> CommandResult:
return await run_command(
f"bash {_script('plex-recent-additions.sh')} recent {days}"
)
async def library_stats() -> CommandResult:
return await run_command(f"bash {_script('plex-recent-additions.sh')} stats")
async def media_counts() -> CommandResult:
return await run_command(f"bash {_script('plex-recent-additions.sh')} count")
async def list_libraries_query() -> CommandResult:
return await run_command(f"bash {_script('plex-recent-additions.sh')} libraries")
async def custom_query(sql: str) -> CommandResult:
return await run_command(
f"bash {_script('plex-recent-additions.sh')} custom {shlex.quote(sql)}"
)
# Testing ─────────────────────────────────────────────────────────────────
async def run_tests_quick() -> CommandResult:
return await run_command(
f"bash {_script('test-plex-backup.sh')} --quick", timeout=120
)
async def run_tests_unit() -> CommandResult:
return await run_command(
f"bash {_script('test-plex-backup.sh')} --unit", timeout=300
)
async def run_tests_integration() -> CommandResult:
return await run_command(
f"bash {_script('integration-test-plex.sh')} --quick", timeout=300
)
async def run_tests_cleanup() -> CommandResult:
return await run_command(
f"bash {_script('test-plex-backup.sh')} --cleanup"
)

279
plex/tui/plex_tui.tcss Normal file
View File

@@ -0,0 +1,279 @@
/* Plex TUI Theme - Orange/Dark inspired by Plex branding */
Screen {
background: $surface;
}
Header {
background: #282828;
color: #e5a00d;
}
Footer {
background: #282828;
}
/* Sidebar navigation */
#sidebar {
width: 32;
background: #1a1a2e;
border-right: solid #e5a00d;
padding: 1 0;
}
#sidebar-title {
text-align: center;
text-style: bold;
color: #e5a00d;
padding: 0 1;
margin-bottom: 1;
}
#nav-list {
background: transparent;
}
#nav-list > ListItem {
padding: 0 1;
height: 3;
background: transparent;
}
#nav-list > ListItem:hover {
background: transparent;
}
#nav-list > ListItem.-active {
background: #e5a00d 20%;
}
.nav-label {
padding: 1 2;
width: 100%;
}
/* Main content */
#main-content {
padding: 1 2;
}
/* Section headers */
.section-header {
text-style: bold;
color: #e5a00d;
padding: 0 0 1 0;
text-align: center;
}
/* Status panel */
#status-panel {
height: auto;
max-height: 12;
border: solid #444;
margin-bottom: 1;
padding: 1;
}
/* Action buttons */
.action-btn {
margin: 0 1 1 0;
min-width: 24;
}
.action-btn:hover {
opacity: 1.0;
}
.action-btn-danger {
margin: 0 1 1 0;
min-width: 24;
background: $error;
}
.action-btn-danger:hover {
background: $error;
}
.action-btn-warning {
margin: 0 1 1 0;
min-width: 24;
background: $warning;
}
.action-btn-warning:hover {
background: $warning;
}
.action-btn-success {
margin: 0 1 1 0;
min-width: 24;
background: $success;
}
.action-btn-success:hover {
background: $success;
}
/* Suppress default button hover tint */
Button:hover {
opacity: 1.0;
}
/* Button rows */
.button-row {
layout: horizontal;
height: auto;
padding: 0 0 1 0;
}
/* Output log */
#output-log {
border: solid #444;
height: 1fr;
margin-top: 1;
}
/* DataTable */
DataTable {
height: auto;
max-height: 20;
margin-bottom: 1;
}
/* Confirmation dialog */
ConfirmDialog {
align: center middle;
}
ConfirmDialog > #dialog-container {
width: 60;
height: auto;
border: thick $error;
background: $surface;
padding: 1 2;
}
ConfirmDialog #dialog-title {
text-style: bold;
color: $error;
text-align: center;
margin-bottom: 1;
}
ConfirmDialog #dialog-body {
margin-bottom: 1;
}
ConfirmDialog .dialog-buttons {
layout: horizontal;
align-horizontal: center;
height: auto;
}
ConfirmDialog .dialog-buttons Button {
margin: 0 2;
}
/* Input dialog */
InputDialog {
align: center middle;
}
InputDialog > #input-dialog-container {
width: 60;
height: auto;
border: thick #e5a00d;
background: $surface;
padding: 1 2;
}
InputDialog #input-dialog-title {
text-style: bold;
color: #e5a00d;
text-align: center;
margin-bottom: 1;
}
InputDialog #input-dialog-body {
margin-bottom: 1;
}
InputDialog .dialog-buttons {
layout: horizontal;
align-horizontal: center;
height: auto;
}
InputDialog .dialog-buttons Button {
margin: 0 2;
}
/* Password dialog */
PasswordDialog {
align: center middle;
}
PasswordDialog > #password-dialog-container {
width: 60;
height: auto;
border: thick #e5a00d;
background: $surface;
padding: 1 2;
}
PasswordDialog #password-dialog-title {
text-style: bold;
color: #e5a00d;
text-align: center;
margin-bottom: 1;
}
PasswordDialog #password-dialog-body {
margin-bottom: 1;
}
PasswordDialog .dialog-buttons {
layout: horizontal;
align-horizontal: center;
height: auto;
}
PasswordDialog .dialog-buttons Button {
margin: 0 2;
}
/* Tabs */
TabbedContent {
height: 1fr;
}
TabPane {
padding: 1;
}
/* Info cards */
.info-card {
background: #1a1a2e;
border: solid #444;
padding: 1;
margin-bottom: 1;
height: auto;
}
.info-card-title {
text-style: bold;
color: #e5a00d;
}
/* Status indicators */
.status-ok {
color: $success;
}
.status-error {
color: $error;
}
.status-warning {
color: $warning;
}

View File

@@ -50,11 +50,11 @@
set -e
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m'
RED=$'\033[0;31m'
GREEN=$'\033[0;32m'
YELLOW=$'\033[1;33m'
BLUE=$'\033[0;34m'
NC=$'\033[0m'
# Configuration
SCRIPT_DIR="$(dirname "$(readlink -f "$0")")"

View File

@@ -53,11 +53,11 @@
# Comprehensive check to ensure Plex is fully recovered and functional
# Colors for output
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m' # No Color
RED=$'\033[0;31m'
GREEN=$'\033[0;32m'
YELLOW=$'\033[1;33m'
BLUE=$'\033[0;34m'
NC=$'\033[0m' # No Color
PLEX_DB_DIR="/var/lib/plexmediaserver/Library/Application Support/Plex Media Server/Plug-in Support/Databases"

447
restore-karakeep.sh Executable file
View File

@@ -0,0 +1,447 @@
#!/bin/bash
# restore-karakeep.sh
# Restore Karakeep Docker volumes from a backup created by backup-karakeep.sh
#
# Usage:
# ./restore-karakeep.sh <backup_directory>
# ./restore-karakeep.sh --latest
#
# EXAMPLES:
# ./restore-karakeep.sh /home/acedanger/backups/karakeep/20260325_143000
# ./restore-karakeep.sh --latest # auto-selects most recent local backup
set -e
# Color codes for output
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
CYAN='\033[0;36m'
NC='\033[0m' # No Color
# Configuration
SCRIPT_DIR="$(dirname "$(readlink -f "$0")")"
COMPOSE_DIR="/home/acedanger/docker/karakeep"
LOCAL_BACKUP_BASE="/home/acedanger/backups/karakeep"
# COMPOSE_DIR may be overridden with --compose-dir
NAS_BACKUP_BASE="/mnt/share/media/backups/karakeep"
LOG_ROOT="${SCRIPT_DIR}/logs"
NAS_LOG_DIR="/mnt/share/media/backups/logs"
RESTORE_TIMESTAMP=$(date +%Y%m%d_%H%M%S)
# Create log directory and set log file paths
mkdir -p "$LOG_ROOT"
LOG_FILE="${LOG_ROOT}/karakeep-restore-${RESTORE_TIMESTAMP}.log"
MARKDOWN_LOG="${LOG_ROOT}/karakeep-restore-${RESTORE_TIMESTAMP}.md"
# Write markdown log header
{
echo "# Karakeep Restore Log"
echo "**Started**: $(date '+%Y-%m-%d %H:%M:%S')"
echo "**Host**: $(hostname)"
echo ""
} > "$MARKDOWN_LOG"
# Volume definitions: volume_name -> mount_path
declare -A KARAKEEP_VOLUMES=(
["hoarder_data"]="/data"
["hoarder_meilisearch"]="/meili_data"
)
# Logging functions
log_message() {
local timestamp
timestamp=$(date '+%Y-%m-%d %H:%M:%S')
echo -e "${CYAN}[${timestamp}]${NC} $1"
echo "[${timestamp}] $1" >> "$LOG_FILE" 2>/dev/null || true
}
log_info() {
local timestamp
timestamp=$(date '+%Y-%m-%d %H:%M:%S')
echo -e "${BLUE}[${timestamp}] INFO:${NC} $1"
echo "[${timestamp}] INFO: $1" >> "$LOG_FILE" 2>/dev/null || true
}
log_success() {
local timestamp
timestamp=$(date '+%Y-%m-%d %H:%M:%S')
echo -e "${GREEN}[${timestamp}] SUCCESS:${NC} $1"
echo "[${timestamp}] SUCCESS: $1" >> "$LOG_FILE" 2>/dev/null || true
}
log_warning() {
local timestamp
timestamp=$(date '+%Y-%m-%d %H:%M:%S')
echo -e "${YELLOW}[${timestamp}] WARNING:${NC} $1"
echo "[${timestamp}] WARNING: $1" >> "$LOG_FILE" 2>/dev/null || true
}
log_error() {
local timestamp
timestamp=$(date '+%Y-%m-%d %H:%M:%S')
echo -e "${RED}[${timestamp}] ERROR:${NC} $1" >&2
echo "[${timestamp}] ERROR: $1" >> "$LOG_FILE" 2>/dev/null || true
}
# Copy log files for this restore run to the NAS logs directory
copy_logs_to_nas() {
if ! mountpoint -q "/mnt/share/media" 2>/dev/null; then
log_warning "NAS not mounted - skipping log copy to NAS"
return 1
fi
if [ ! -d "$NAS_LOG_DIR" ]; then
if ! mkdir -p "$NAS_LOG_DIR" 2>/dev/null; then
log_warning "Could not create NAS log directory: $NAS_LOG_DIR"
return 1
fi
fi
local copied=0
for log_file_path in "$LOG_FILE" "$MARKDOWN_LOG"; do
if [ -f "$log_file_path" ]; then
if cp "$log_file_path" "$NAS_LOG_DIR/" 2>/dev/null; then
log_info "Copied log to NAS: $NAS_LOG_DIR/$(basename "$log_file_path")"
copied=$((copied + 1))
else
log_warning "Failed to copy log to NAS: $log_file_path"
fi
fi
done
[ "$copied" -gt 0 ] && log_success "Copied $copied log file(s) to NAS: $NAS_LOG_DIR"
return 0
}
# Show usage
show_help() {
cat << EOF
Karakeep Restore Script
Usage: $0 <backup_directory>
$0 --latest
ARGUMENTS:
backup_directory Path to a timestamped backup directory produced by backup-karakeep.sh
--latest Automatically use the most recent backup in $LOCAL_BACKUP_BASE
-h, --help Show this help message
EXAMPLES:
$0 /home/acedanger/backups/karakeep/20260325_143000
$0 --latest
$0 /mnt/share/media/backups/karakeep/20260325_143000
$0 --compose-dir /home/user/docker/karakeep /mnt/share/media/backups/karakeep/20260325_143000
WHAT THIS SCRIPT DOES:
1. Stops all Karakeep containers
2. Clears existing volume data
3. Restores hoarder_data from backup archive
4. Restores hoarder_meilisearch from backup archive
5. Restarts all Karakeep containers
VOLUMES RESTORED:
- hoarder_data (Karakeep app data: bookmarks, assets, database)
- hoarder_meilisearch (Meilisearch search index)
OPTIONS:
--compose-dir DIR Override the path to the Karakeep docker-compose directory
(default: $COMPOSE_DIR)
EOF
}
# Parse arguments
BACKUP_DIR=""
while [[ $# -gt 0 ]]; do
case "$1" in
-h|--help)
show_help
exit 0
;;
--compose-dir)
if [[ -z "${2:-}" ]]; then
log_error "--compose-dir requires a path argument"
exit 1
fi
COMPOSE_DIR="$2"
shift 2
;;
--latest)
if [ ! -d "$LOCAL_BACKUP_BASE" ]; then
log_error "Local backup base directory not found: $LOCAL_BACKUP_BASE"
exit 1
fi
BACKUP_DIR=$(find "$LOCAL_BACKUP_BASE" -maxdepth 1 -mindepth 1 -type d | sort -r | head -n1)
if [ -z "$BACKUP_DIR" ]; then
log_error "No backups found in $LOCAL_BACKUP_BASE"
exit 1
fi
log_info "Auto-selected latest backup: $BACKUP_DIR"
shift
;;
"")
log_error "No backup directory specified."
show_help
exit 1
;;
*)
if [[ -z "$BACKUP_DIR" ]]; then
BACKUP_DIR="$1"
else
log_error "Unexpected argument: $1"
show_help
exit 1
fi
shift
;;
esac
done
if [[ -z "$BACKUP_DIR" ]]; then
log_error "No backup directory specified."
show_help
exit 1
fi
# Validate backup directory
if [ ! -d "$BACKUP_DIR" ]; then
log_error "Backup directory not found: $BACKUP_DIR"
exit 1
fi
BACKUP_DIR="$(realpath "$BACKUP_DIR")"
# Check that the backup contains expected archives
MISSING_ARCHIVES=()
for volume_name in "${!KARAKEEP_VOLUMES[@]}"; do
archive="${BACKUP_DIR}/${volume_name}.tar.gz"
if [ ! -f "$archive" ]; then
MISSING_ARCHIVES+=("$volume_name")
fi
done
if [ ${#MISSING_ARCHIVES[@]} -gt 0 ]; then
log_warning "The following volume archives are missing from the backup:"
for vol in "${MISSING_ARCHIVES[@]}"; do
log_warning " - ${vol}.tar.gz"
done
echo ""
echo -e "${YELLOW}Continuing will only restore the archives that are present.${NC}"
fi
# Confirm restore intent
echo ""
echo -e "${YELLOW}========================================================${NC}"
echo -e "${YELLOW} KARAKEEP RESTORE - DESTRUCTIVE OPERATION${NC}"
echo -e "${YELLOW}========================================================${NC}"
echo ""
echo -e " Backup source : ${CYAN}${BACKUP_DIR}${NC}"
echo -e " Compose dir : ${CYAN}${COMPOSE_DIR}${NC}"
echo ""
echo -e "${RED} WARNING: This will STOP all Karakeep containers and${NC}"
echo -e "${RED} ERASE all current volume data before restoring.${NC}"
echo -e "${RED} This action cannot be undone.${NC}"
echo ""
echo -n " Type 'yes' to continue: "
read -r confirmation
if [[ "$confirmation" != "yes" ]]; then
log_info "Restore cancelled by user."
exit 0
fi
echo ""
# Verify compose file exists
if [ ! -f "$COMPOSE_DIR/docker-compose.yml" ]; then
log_error "docker-compose.yml not found at $COMPOSE_DIR"
exit 1
fi
# Verify Docker is available
if ! docker info > /dev/null 2>&1; then
log_error "Docker is not running or not accessible"
exit 1
fi
CONTAINERS_RUNNING=false
RESTORE_START_TIME=$(date +%s)
log_message "=== KARAKEEP RESTORE STARTED ==="
log_message "Host: $(hostname)"
log_message "Restore Timestamp: $RESTORE_TIMESTAMP"
log_message "Backup Source: $BACKUP_DIR"
log_message "Compose Dir: $COMPOSE_DIR"
log_info "Log file: $LOG_FILE"
# Record restore parameters in markdown log
{
echo "## Restore Parameters"
echo "- **Backup Source**: $BACKUP_DIR"
echo "- **Compose Dir**: $COMPOSE_DIR"
echo ""
} >> "$MARKDOWN_LOG"
# Ensure containers are restarted on unexpected exit
cleanup_on_exit() {
if [[ "$CONTAINERS_RUNNING" == "false" ]]; then
log_warning "Attempting to restart Karakeep containers after unexpected exit..."
docker compose -f "$COMPOSE_DIR/docker-compose.yml" up -d >> "$LOG_FILE" 2>&1 || \
log_error "Failed to restart containers - manual intervention required"
fi
copy_logs_to_nas
}
trap cleanup_on_exit EXIT
# Step 1: Stop containers
log_message "Step 1/5: Stopping Karakeep containers..."
down_output=$(docker compose --progress plain -f "$COMPOSE_DIR/docker-compose.yml" down 2>&1)
down_exit=$?
echo "$down_output" | tee -a "$LOG_FILE" > /dev/null
if [[ $down_exit -eq 0 ]]; then
log_success "Containers stopped and removed"
else
log_warning "docker compose down reported an error (exit $down_exit) - proceeding anyway"
fi
# Step 2: Ensure external volumes exist (create if absent)
log_message "Step 2/5: Ensuring Docker volumes exist..."
for volume_name in "${!KARAKEEP_VOLUMES[@]}"; do
if ! docker volume inspect "$volume_name" > /dev/null 2>&1; then
log_info "Creating missing volume: $volume_name"
docker volume create "$volume_name"
fi
log_info "Volume ready: $volume_name"
done
# Step 3: Clear existing volume data
log_message "Step 3/5: Clearing existing volume data..."
for volume_name in "${!KARAKEEP_VOLUMES[@]}"; do
mount_path="${KARAKEEP_VOLUMES[$volume_name]}"
archive="${BACKUP_DIR}/${volume_name}.tar.gz"
# Only clear volumes for which we have a backup to restore
if [ ! -f "$archive" ]; then
log_warning "Skipping clear of $volume_name - no archive found, keeping existing data"
continue
fi
log_info "Clearing volume: $volume_name"
if docker run --rm \
--volume "${volume_name}:${mount_path}" \
alpine \
find "${mount_path:?}" -mindepth 1 -delete 2>&1 | tee -a "$LOG_FILE"; then
log_success "Cleared volume: $volume_name"
else
log_warning "Could not fully clear $volume_name - restore may overlay existing data"
fi
done
# Step 4: Restore volumes from archives
log_message "Step 4/5: Restoring volume data from archives..."
RESTORE_SUCCESS=0
RESTORE_FAILED=0
for volume_name in "${!KARAKEEP_VOLUMES[@]}"; do
mount_path="${KARAKEEP_VOLUMES[$volume_name]}"
archive="${BACKUP_DIR}/${volume_name}.tar.gz"
if [ ! -f "$archive" ]; then
log_warning "Skipping restore of $volume_name - archive not found: $archive"
RESTORE_FAILED=$((RESTORE_FAILED + 1))
continue
fi
log_info "Verifying archive integrity: $archive"
if ! gzip -t "$archive" 2>/dev/null; then
log_error "Archive is corrupt or invalid: $archive"
RESTORE_FAILED=$((RESTORE_FAILED + 1))
continue
fi
log_info "Restoring volume $volume_name from $archive"
# Extract the archive into the volume using an Alpine helper container.
# The archive was created with the directory name (e.g. "data" or "meili_data")
# at the top level, so we extract into the parent of the mount path.
if docker run --rm \
--volume "${volume_name}:${mount_path}" \
--volume "${archive}:/backup/${volume_name}.tar.gz:ro" \
alpine \
tar xzf "/backup/${volume_name}.tar.gz" -C "$(dirname "$mount_path")" 2>&1 | tee -a "$LOG_FILE"; then
log_success "Restored volume: $volume_name"
RESTORE_SUCCESS=$((RESTORE_SUCCESS + 1))
else
log_error "Failed to restore volume: $volume_name"
RESTORE_FAILED=$((RESTORE_FAILED + 1))
fi
done
# Step 5: Start containers
log_message "Step 5/5: Starting Karakeep containers..."
CONTAINERS_RUNNING=true
up_output=$(docker compose --progress plain -f "$COMPOSE_DIR/docker-compose.yml" up -d 2>&1)
up_exit=$?
echo "$up_output" | tee -a "$LOG_FILE" > /dev/null
if [[ $up_exit -eq 0 ]]; then
log_success "Karakeep containers started"
else
log_error "Failed to start Karakeep containers (exit $up_exit) - check docker compose logs"
CONTAINERS_RUNNING=false
exit 1
fi
# Remove the trap since we handled startup cleanly
trap - EXIT
# Calculate total restore time
RESTORE_END_TIME=$(date +%s)
RESTORE_TOTAL_TIME=$((RESTORE_END_TIME - RESTORE_START_TIME))
# Write markdown summary
{
echo "## Restore Results"
echo "- **Volumes Restored**: $RESTORE_SUCCESS"
echo "- **Volumes Failed**: $RESTORE_FAILED"
echo "- **Duration**: ${RESTORE_TOTAL_TIME}s"
echo "- **Completed**: $(date '+%Y-%m-%d %H:%M:%S')"
echo ""
} >> "$MARKDOWN_LOG"
# Copy logs to NAS
copy_logs_to_nas
# Summary
echo ""
echo -e "${GREEN}========================================================${NC}"
if [ "$RESTORE_FAILED" -eq 0 ]; then
echo -e "${GREEN} KARAKEEP RESTORE COMPLETE${NC}"
else
echo -e "${YELLOW} KARAKEEP RESTORE COMPLETE (WITH WARNINGS)${NC}"
fi
echo -e "${GREEN}========================================================${NC}"
echo ""
echo -e " Volumes restored : ${GREEN}${RESTORE_SUCCESS}${NC}"
echo -e " Volumes failed : ${RED}${RESTORE_FAILED}${NC}"
echo -e " Backup source : ${CYAN}${BACKUP_DIR}${NC}"
echo -e " Duration : ${RESTORE_TOTAL_TIME}s"
echo -e " Log file : ${CYAN}${LOG_FILE}${NC}"
echo -e " Markdown report : ${CYAN}${MARKDOWN_LOG}${NC}"
echo ""
if [ "$RESTORE_FAILED" -gt 0 ]; then
log_warning "Some volumes could not be restored. Review the output above."
log_warning "Log file: $LOG_FILE"
exit 1
fi
log_success "Karakeep has been fully restored from: $BACKUP_DIR"
log_message "Log file: $LOG_FILE"
log_message "Markdown report: $MARKDOWN_LOG"
exit 0