mirror of
https://github.com/acedanger/shell.git
synced 2026-03-27 06:16:09 -07:00
869 lines
27 KiB
Bash
Executable File
869 lines
27 KiB
Bash
Executable File
#!/bin/bash
|
|
|
|
set -e
|
|
|
|
# Load the unified backup metrics library
|
|
SCRIPT_DIR="$(dirname "$(readlink -f "$0")")"
|
|
LIB_DIR="$SCRIPT_DIR/lib"
|
|
if [[ -f "$LIB_DIR/unified-backup-metrics.sh" ]]; then
|
|
# shellcheck source=lib/unified-backup-metrics.sh
|
|
source "$LIB_DIR/unified-backup-metrics.sh"
|
|
METRICS_ENABLED=true
|
|
else
|
|
echo "Warning: Unified backup metrics library not found at $LIB_DIR/unified-backup-metrics.sh"
|
|
METRICS_ENABLED=false
|
|
fi
|
|
|
|
# Color codes for output
|
|
RED='\033[0;31m'
|
|
GREEN='\033[0;32m'
|
|
YELLOW='\033[1;33m'
|
|
BLUE='\033[0;34m'
|
|
CYAN='\033[0;36m'
|
|
NC='\033[0m' # No Color
|
|
|
|
# Configuration
|
|
MAX_BACKUP_AGE_DAYS=30
|
|
MAX_BACKUPS_TO_KEEP=10
|
|
COMPOSE_DIR="/home/acedanger/docker/karakeep"
|
|
BACKUP_ROOT="/mnt/share/media/backups/karakeep"
|
|
LOCAL_BACKUP_DIR="/home/acedanger/backups/karakeep"
|
|
LOG_ROOT="${SCRIPT_DIR}/logs"
|
|
JSON_LOG_FILE="${SCRIPT_DIR}/logs/karakeep-backup.json"
|
|
PERFORMANCE_LOG_FILE="${SCRIPT_DIR}/logs/karakeep-backup-performance.json"
|
|
|
|
NAS_LOG_DIR="/mnt/share/media/backups/logs"
|
|
|
|
# Volume configuration: volume_name -> mount_path inside container
|
|
declare -A KARAKEEP_VOLUMES=(
|
|
["hoarder_data"]="/data"
|
|
["hoarder_meilisearch"]="/meili_data"
|
|
)
|
|
|
|
# Script options
|
|
VERIFY_BACKUPS=true
|
|
PERFORMANCE_MONITORING=true
|
|
WEBHOOK_URL="https://notify.peterwood.rocks/lab"
|
|
INTERACTIVE_MODE=false
|
|
DRY_RUN=false
|
|
STOP_CONTAINERS=true # Stop containers before backup for consistency
|
|
SKIP_NAS=false
|
|
|
|
# show help function
|
|
show_help() {
|
|
cat << EOF
|
|
Karakeep Services Backup Script
|
|
|
|
Usage: $0 [OPTIONS]
|
|
|
|
OPTIONS:
|
|
--dry-run Show what would be backed up without actually doing it
|
|
--no-verify Skip backup verification
|
|
--no-stop Do hot backup without stopping containers (less safe)
|
|
--no-nas Skip copying to NAS, keep local backups only
|
|
--interactive Ask for confirmation before each backup
|
|
--webhook URL Custom webhook URL for notifications
|
|
-h, --help Show this help message
|
|
|
|
EXAMPLES:
|
|
$0 # Run full backup with container stop/start
|
|
$0 --dry-run # Preview what would be backed up
|
|
$0 --no-stop # Hot backup without stopping containers
|
|
$0 --no-nas # Local backup only (skip NAS copy)
|
|
$0 --no-verify # Skip verification for faster backup
|
|
|
|
VOLUMES BACKED UP:
|
|
- hoarder_data (Karakeep app data: bookmarks, assets, database)
|
|
- hoarder_meilisearch (Meilisearch search index)
|
|
|
|
COMPOSE DIRECTORY:
|
|
$COMPOSE_DIR
|
|
|
|
EOF
|
|
}
|
|
|
|
# Parse command line arguments
|
|
while [[ $# -gt 0 ]]; do
|
|
case $1 in
|
|
--dry-run)
|
|
DRY_RUN=true
|
|
shift
|
|
;;
|
|
--no-verify)
|
|
VERIFY_BACKUPS=false
|
|
shift
|
|
;;
|
|
--no-stop)
|
|
STOP_CONTAINERS=false
|
|
shift
|
|
;;
|
|
--no-nas)
|
|
SKIP_NAS=true
|
|
shift
|
|
;;
|
|
--interactive)
|
|
INTERACTIVE_MODE=true
|
|
shift
|
|
;;
|
|
--webhook)
|
|
WEBHOOK_URL="$2"
|
|
shift 2
|
|
;;
|
|
-h|--help)
|
|
show_help
|
|
exit 0
|
|
;;
|
|
*)
|
|
echo "Unknown option: $1"
|
|
show_help
|
|
exit 1
|
|
;;
|
|
esac
|
|
done
|
|
|
|
# Timestamp for this backup run
|
|
BACKUP_TIMESTAMP=$(date +%Y%m%d_%H%M%S)
|
|
BACKUP_DEST="${LOCAL_BACKUP_DIR}/${BACKUP_TIMESTAMP}"
|
|
|
|
# Create necessary directories
|
|
mkdir -p "${LOG_ROOT}"
|
|
mkdir -p "${LOCAL_BACKUP_DIR}"
|
|
|
|
# Log files
|
|
LOG_FILE="${LOG_ROOT}/karakeep-backup-${BACKUP_TIMESTAMP}.log"
|
|
MARKDOWN_LOG="${LOG_ROOT}/karakeep-backup-${BACKUP_TIMESTAMP}.md"
|
|
|
|
# Logging functions
|
|
log_message() {
|
|
local message="$1"
|
|
local timestamp
|
|
timestamp=$(date '+%Y-%m-%d %H:%M:%S')
|
|
echo -e "${CYAN}[${timestamp}]${NC} ${message}"
|
|
echo "[${timestamp}] $message" >> "${LOG_FILE}" 2>/dev/null || true
|
|
}
|
|
|
|
log_error() {
|
|
local message="$1"
|
|
local timestamp
|
|
timestamp=$(date '+%Y-%m-%d %H:%M:%S')
|
|
echo -e "${RED}[${timestamp}] ERROR:${NC} ${message}" >&2
|
|
echo "[${timestamp}] ERROR: $message" >> "${LOG_FILE}" 2>/dev/null || true
|
|
}
|
|
|
|
log_success() {
|
|
local message="$1"
|
|
local timestamp
|
|
timestamp=$(date '+%Y-%m-%d %H:%M:%S')
|
|
echo -e "${GREEN}[${timestamp}] SUCCESS:${NC} ${message}"
|
|
echo "[${timestamp}] SUCCESS: $message" >> "${LOG_FILE}" 2>/dev/null || true
|
|
}
|
|
|
|
log_warning() {
|
|
local message="$1"
|
|
local timestamp
|
|
timestamp=$(date '+%Y-%m-%d %H:%M:%S')
|
|
echo -e "${YELLOW}[${timestamp}] WARNING:${NC} ${message}"
|
|
echo "[${timestamp}] WARNING: $message" >> "${LOG_FILE}" 2>/dev/null || true
|
|
}
|
|
|
|
log_info() {
|
|
local message="$1"
|
|
local timestamp
|
|
timestamp=$(date '+%Y-%m-%d %H:%M:%S')
|
|
echo -e "${BLUE}[${timestamp}] INFO:${NC} ${message}"
|
|
echo "[${timestamp}] INFO: $message" >> "${LOG_FILE}" 2>/dev/null || true
|
|
}
|
|
|
|
# Performance tracking
|
|
track_performance() {
|
|
if [ "$PERFORMANCE_MONITORING" != true ]; then
|
|
return 0
|
|
fi
|
|
|
|
local operation="$1"
|
|
local start_time="$2"
|
|
local end_time="${3:-$(date +%s)}"
|
|
local duration=$((end_time - start_time))
|
|
|
|
if [ ! -f "$PERFORMANCE_LOG_FILE" ]; then
|
|
echo "[]" > "$PERFORMANCE_LOG_FILE"
|
|
fi
|
|
|
|
if command -v jq > /dev/null 2>&1; then
|
|
local entry
|
|
entry=$(jq -n \
|
|
--arg timestamp "$(date -Iseconds)" \
|
|
--arg operation "$operation" \
|
|
--arg duration "$duration" \
|
|
--arg hostname "$(hostname)" \
|
|
'{
|
|
timestamp: $timestamp,
|
|
operation: $operation,
|
|
duration: ($duration | tonumber),
|
|
hostname: $hostname
|
|
}')
|
|
|
|
local lock_file="${PERFORMANCE_LOG_FILE}.lock"
|
|
local max_wait=10
|
|
local wait_count=0
|
|
|
|
while [ $wait_count -lt $max_wait ]; do
|
|
if (set -C; echo $$ > "$lock_file") 2>/dev/null; then
|
|
break
|
|
fi
|
|
sleep 0.1
|
|
((wait_count++))
|
|
done
|
|
|
|
if [ $wait_count -lt $max_wait ]; then
|
|
if jq --argjson entry "$entry" '. += [$entry]' "$PERFORMANCE_LOG_FILE" > "${PERFORMANCE_LOG_FILE}.tmp" 2>/dev/null; then
|
|
mv "${PERFORMANCE_LOG_FILE}.tmp" "$PERFORMANCE_LOG_FILE"
|
|
else
|
|
rm -f "${PERFORMANCE_LOG_FILE}.tmp"
|
|
fi
|
|
rm -f "$lock_file"
|
|
fi
|
|
fi
|
|
|
|
log_info "Performance: $operation completed in ${duration}s"
|
|
}
|
|
|
|
# Initialize JSON log file
|
|
initialize_json_log() {
|
|
if [ ! -f "${JSON_LOG_FILE}" ] || ! jq empty "${JSON_LOG_FILE}" 2>/dev/null; then
|
|
echo "{}" > "${JSON_LOG_FILE}"
|
|
log_message "Initialized JSON log file"
|
|
fi
|
|
}
|
|
|
|
# Log backup details with markdown formatting
|
|
log_file_details() {
|
|
local volume="$1"
|
|
local dest="$2"
|
|
local status="$3"
|
|
local size=""
|
|
local checksum=""
|
|
|
|
if [ "$status" == "SUCCESS" ] && [ -e "$dest" ]; then
|
|
size=$(du -sh "$dest" 2>/dev/null | cut -f1 || echo "Unknown")
|
|
if [ "$VERIFY_BACKUPS" == true ]; then
|
|
checksum=$(md5sum "$dest" 2>/dev/null | cut -d' ' -f1 || echo "N/A")
|
|
fi
|
|
else
|
|
size="N/A"
|
|
checksum="N/A"
|
|
fi
|
|
|
|
local markdown_lock="${MARKDOWN_LOG}.lock"
|
|
local max_wait=30
|
|
local wait_count=0
|
|
|
|
while [ $wait_count -lt $max_wait ]; do
|
|
if (set -C; echo $$ > "$markdown_lock") 2>/dev/null; then
|
|
break
|
|
fi
|
|
sleep 0.1
|
|
((wait_count++))
|
|
done
|
|
|
|
if [ $wait_count -lt $max_wait ]; then
|
|
{
|
|
echo "## Volume: $volume"
|
|
echo "- **Status**: $status"
|
|
echo "- **Destination**: $dest"
|
|
echo "- **Size**: $size"
|
|
echo "- **Checksum**: $checksum"
|
|
echo "- **Timestamp**: $(date '+%Y-%m-%d %H:%M:%S')"
|
|
echo ""
|
|
} >> "$MARKDOWN_LOG"
|
|
rm -f "$markdown_lock"
|
|
else
|
|
log_warning "Could not acquire markdown log lock for $volume"
|
|
fi
|
|
|
|
if command -v jq > /dev/null 2>&1; then
|
|
update_backup_log "$volume" "$dest" "$status" "$size" "$checksum"
|
|
fi
|
|
}
|
|
|
|
# Update backup log in JSON format
|
|
update_backup_log() {
|
|
local volume="$1"
|
|
local dest="$2"
|
|
local status="$3"
|
|
local size="$4"
|
|
local checksum="$5"
|
|
local timestamp
|
|
timestamp=$(date -Iseconds)
|
|
|
|
if ! command -v jq > /dev/null 2>&1; then
|
|
return 0
|
|
fi
|
|
|
|
local lock_file="${JSON_LOG_FILE}.lock"
|
|
local max_wait=30
|
|
local wait_count=0
|
|
|
|
while [ $wait_count -lt $max_wait ]; do
|
|
if (set -C; echo $$ > "$lock_file") 2>/dev/null; then
|
|
break
|
|
fi
|
|
sleep 0.1
|
|
((wait_count++))
|
|
done
|
|
|
|
if [ $wait_count -ge $max_wait ]; then
|
|
log_warning "Could not acquire lock for JSON log update"
|
|
return 1
|
|
fi
|
|
|
|
local entry
|
|
entry=$(jq -n \
|
|
--arg volume "$volume" \
|
|
--arg dest "$dest" \
|
|
--arg status "$status" \
|
|
--arg size "$size" \
|
|
--arg checksum "$checksum" \
|
|
--arg timestamp "$timestamp" \
|
|
'{
|
|
volume: $volume,
|
|
destination: $dest,
|
|
status: $status,
|
|
size: $size,
|
|
checksum: $checksum,
|
|
timestamp: $timestamp
|
|
}')
|
|
|
|
if jq --argjson entry "$entry" --arg volume "$volume" \
|
|
'.[$volume] = $entry' "$JSON_LOG_FILE" > "${JSON_LOG_FILE}.tmp" 2>/dev/null; then
|
|
mv "${JSON_LOG_FILE}.tmp" "$JSON_LOG_FILE"
|
|
else
|
|
rm -f "${JSON_LOG_FILE}.tmp"
|
|
fi
|
|
|
|
rm -f "$lock_file"
|
|
}
|
|
|
|
# Check if NAS mount is accessible
|
|
check_nas_mount() {
|
|
local mount_point="/mnt/share/media"
|
|
|
|
if ! mountpoint -q "$mount_point"; then
|
|
log_warning "NAS not mounted at $mount_point - backups will be local only"
|
|
return 1
|
|
fi
|
|
|
|
if [ ! -w "$(dirname "$BACKUP_ROOT")" ]; then
|
|
log_warning "No write access to NAS backup path: $BACKUP_ROOT"
|
|
return 1
|
|
fi
|
|
|
|
log_success "NAS mount check passed: $mount_point is accessible"
|
|
return 0
|
|
}
|
|
|
|
# Verify backup archive integrity
|
|
verify_backup() {
|
|
local volume="$1"
|
|
local archive="$2"
|
|
|
|
if [ "$VERIFY_BACKUPS" != true ]; then
|
|
return 0
|
|
fi
|
|
|
|
log_info "Verifying backup archive: $archive"
|
|
|
|
if [ ! -f "$archive" ]; then
|
|
log_error "Backup archive not found: $archive"
|
|
return 1
|
|
fi
|
|
|
|
local file_size
|
|
file_size=$(stat -c%s "$archive" 2>/dev/null || echo "0")
|
|
|
|
if [ "$file_size" -eq 0 ]; then
|
|
log_error "Backup archive is empty: $archive"
|
|
return 1
|
|
fi
|
|
|
|
# Verify gzip container integrity (reads entire file, checks CRC).
|
|
# gzip -t is more reliable than tar -tzf, which can exit non-zero on
|
|
# tar warnings (e.g. special files) even when the archive is valid.
|
|
if ! gzip -t "$archive" 2>/dev/null; then
|
|
log_error "Backup archive failed integrity check: $archive"
|
|
return 1
|
|
fi
|
|
|
|
log_success "Backup verification passed for $volume (${file_size} bytes, gzip integrity OK)"
|
|
return 0
|
|
}
|
|
|
|
# Check disk space at backup destination
|
|
check_disk_space() {
|
|
local destination="$1"
|
|
local required_space_mb="${2:-500}"
|
|
|
|
local available_space_kb
|
|
available_space_kb=$(df "$(dirname "$destination")" 2>/dev/null | awk 'NR==2 {print $4}' || echo "0")
|
|
local available_space_mb=$((available_space_kb / 1024))
|
|
|
|
if [ "$available_space_mb" -lt "$required_space_mb" ]; then
|
|
log_error "Insufficient disk space at $(dirname "$destination"). Available: ${available_space_mb}MB, Required: ${required_space_mb}MB"
|
|
return 1
|
|
fi
|
|
|
|
log_info "Disk space check passed at $(dirname "$destination"). Available: ${available_space_mb}MB"
|
|
return 0
|
|
}
|
|
|
|
# Check if Docker volume exists
|
|
check_volume_exists() {
|
|
local volume_name="$1"
|
|
|
|
if ! docker volume inspect "$volume_name" > /dev/null 2>&1; then
|
|
log_error "Docker volume '$volume_name' not found"
|
|
return 1
|
|
fi
|
|
|
|
return 0
|
|
}
|
|
|
|
# Backup a single named Docker volume to a tar.gz archive
|
|
backup_volume() {
|
|
local volume_name="$1"
|
|
local mount_path="${KARAKEEP_VOLUMES[$volume_name]}"
|
|
local archive="${BACKUP_DEST}/${volume_name}.tar.gz"
|
|
local backup_start_time
|
|
backup_start_time=$(date +%s)
|
|
|
|
log_message "Starting backup for volume: $volume_name (${mount_path})"
|
|
|
|
if [ "$DRY_RUN" == true ]; then
|
|
log_info "DRY RUN: Would backup volume $volume_name -> $archive"
|
|
log_file_details "$volume_name" "$archive" "DRY RUN"
|
|
return 0
|
|
fi
|
|
|
|
if [ "$INTERACTIVE_MODE" == true ]; then
|
|
echo -n "Backup volume $volume_name? (y/N): "
|
|
read -r response
|
|
if [[ ! "$response" =~ ^[Yy]$ ]]; then
|
|
log_info "Skipping $volume_name backup (user choice)"
|
|
return 0
|
|
fi
|
|
fi
|
|
|
|
# Confirm volume exists
|
|
if ! check_volume_exists "$volume_name"; then
|
|
log_file_details "$volume_name" "$archive" "FAILED - Volume not found"
|
|
return 1
|
|
fi
|
|
|
|
# Create destination directory
|
|
mkdir -p "$BACKUP_DEST"
|
|
|
|
log_info "Archiving volume $volume_name to $archive"
|
|
|
|
# Use a minimal Alpine container to tar up the volume contents
|
|
if docker run --rm \
|
|
--volume "${volume_name}:${mount_path}:ro" \
|
|
alpine \
|
|
tar czf - -C "$(dirname "$mount_path")" "$(basename "$mount_path")" \
|
|
> "$archive" 2>>"$LOG_FILE"; then
|
|
|
|
log_success "Volume archive created: $archive"
|
|
|
|
# File-level metrics tracking
|
|
if [[ "$METRICS_ENABLED" == "true" ]]; then
|
|
local file_size
|
|
file_size=$(stat -c%s "$archive" 2>/dev/null || echo "0")
|
|
local checksum
|
|
checksum=$(md5sum "$archive" 2>/dev/null | cut -d' ' -f1 || echo "")
|
|
metrics_add_file "$archive" "success" "$file_size" "$checksum"
|
|
fi
|
|
|
|
if verify_backup "$volume_name" "$archive"; then
|
|
log_file_details "$volume_name" "$archive" "SUCCESS"
|
|
track_performance "backup_${volume_name}" "$backup_start_time"
|
|
return 0
|
|
else
|
|
log_file_details "$volume_name" "$archive" "VERIFICATION_FAILED"
|
|
if [[ "$METRICS_ENABLED" == "true" ]]; then
|
|
local file_size
|
|
file_size=$(stat -c%s "$archive" 2>/dev/null || echo "0")
|
|
metrics_add_file "$archive" "failed" "$file_size" "" "Verification failed"
|
|
fi
|
|
return 1
|
|
fi
|
|
else
|
|
log_error "Failed to archive volume: $volume_name"
|
|
rm -f "$archive"
|
|
log_file_details "$volume_name" "$archive" "FAILED"
|
|
if [[ "$METRICS_ENABLED" == "true" ]]; then
|
|
metrics_add_file "$archive" "failed" "0" "" "Archive creation failed"
|
|
fi
|
|
return 1
|
|
fi
|
|
}
|
|
|
|
# Stop Karakeep containers before backup
|
|
stop_containers() {
|
|
log_message "Stopping Karakeep containers for consistent backup..."
|
|
|
|
if [ ! -f "$COMPOSE_DIR/docker-compose.yml" ]; then
|
|
log_error "docker-compose.yml not found at $COMPOSE_DIR"
|
|
return 1
|
|
fi
|
|
|
|
local compose_output
|
|
if ! compose_output=$(docker compose -f "$COMPOSE_DIR/docker-compose.yml" --progress plain stop 2>&1); then
|
|
echo "$compose_output" | tee -a "$LOG_FILE" > /dev/null
|
|
log_error "Failed to stop Karakeep containers"
|
|
return 1
|
|
fi
|
|
echo "$compose_output" | tee -a "$LOG_FILE" > /dev/null
|
|
|
|
log_success "Karakeep containers stopped"
|
|
return 0
|
|
}
|
|
|
|
# Start Karakeep containers after backup
|
|
start_containers() {
|
|
log_message "Starting Karakeep containers..."
|
|
|
|
local compose_output
|
|
if ! compose_output=$(docker compose -f "$COMPOSE_DIR/docker-compose.yml" --progress plain start 2>&1); then
|
|
echo "$compose_output" | tee -a "$LOG_FILE" > /dev/null
|
|
log_error "Failed to start Karakeep containers - manual intervention required"
|
|
return 1
|
|
fi
|
|
echo "$compose_output" | tee -a "$LOG_FILE" > /dev/null
|
|
|
|
log_success "Karakeep containers started"
|
|
return 0
|
|
}
|
|
|
|
# Copy backup to NAS
|
|
copy_to_nas() {
|
|
local src="$1"
|
|
local nas_dest="${BACKUP_ROOT}/$(basename "$src")"
|
|
|
|
log_info "Copying backup to NAS: $nas_dest"
|
|
mkdir -p "$BACKUP_ROOT"
|
|
|
|
if cp -r "$src" "$nas_dest" 2>>"$LOG_FILE"; then
|
|
log_success "Backup copied to NAS: $nas_dest"
|
|
return 0
|
|
else
|
|
log_error "Failed to copy backup to NAS"
|
|
return 1
|
|
fi
|
|
}
|
|
|
|
# Copy log files for this run to the NAS logs directory
|
|
copy_logs_to_nas() {
|
|
if [ "$SKIP_NAS" == true ]; then
|
|
return 0
|
|
fi
|
|
|
|
if ! mountpoint -q "/mnt/share/media" 2>/dev/null; then
|
|
log_warning "NAS not mounted - skipping log copy to NAS"
|
|
return 1
|
|
fi
|
|
|
|
if [ ! -d "$NAS_LOG_DIR" ]; then
|
|
if ! mkdir -p "$NAS_LOG_DIR" 2>/dev/null; then
|
|
log_warning "Could not create NAS log directory: $NAS_LOG_DIR"
|
|
return 1
|
|
fi
|
|
fi
|
|
|
|
local copied=0
|
|
for log_file in "${LOG_ROOT}/karakeep-backup-${BACKUP_TIMESTAMP}.log" \
|
|
"${LOG_ROOT}/karakeep-backup-${BACKUP_TIMESTAMP}.md"; do
|
|
if [ -f "$log_file" ]; then
|
|
if cp "$log_file" "$NAS_LOG_DIR/" 2>/dev/null; then
|
|
log_info "Copied log to NAS: $NAS_LOG_DIR/$(basename "$log_file")"
|
|
copied=$((copied + 1))
|
|
else
|
|
log_warning "Failed to copy log to NAS: $log_file"
|
|
fi
|
|
fi
|
|
done
|
|
|
|
[ "$copied" -gt 0 ] && log_success "Copied $copied log file(s) to NAS: $NAS_LOG_DIR"
|
|
return 0
|
|
}
|
|
|
|
# Clean up old backups
|
|
cleanup_old_backups() {
|
|
log_message "Cleaning up old backups..."
|
|
|
|
# Clean local backups: keep only MAX_BACKUPS_TO_KEEP most recent timestamped dirs
|
|
find "$LOCAL_BACKUP_DIR" -maxdepth 1 -mindepth 1 -type d | sort -r | \
|
|
tail -n +$((MAX_BACKUPS_TO_KEEP + 1)) | xargs rm -rf 2>/dev/null || true
|
|
|
|
# Clean local backups older than MAX_BACKUP_AGE_DAYS
|
|
find "$LOCAL_BACKUP_DIR" -maxdepth 1 -mindepth 1 -type d -mtime +${MAX_BACKUP_AGE_DAYS} | \
|
|
xargs rm -rf 2>/dev/null || true
|
|
|
|
# Clean NAS backups if accessible
|
|
if check_nas_mount && [ "$SKIP_NAS" != true ]; then
|
|
find "$BACKUP_ROOT" -maxdepth 1 -mindepth 1 -type d | sort -r | \
|
|
tail -n +$((MAX_BACKUPS_TO_KEEP + 1)) | xargs rm -rf 2>/dev/null || true
|
|
|
|
find "$BACKUP_ROOT" -maxdepth 1 -mindepth 1 -type d -mtime +${MAX_BACKUP_AGE_DAYS} | \
|
|
xargs rm -rf 2>/dev/null || true
|
|
|
|
# Clean old NAS karakeep logs
|
|
find "$NAS_LOG_DIR" -maxdepth 1 -name "karakeep-backup-*.log" -mtime +${MAX_BACKUP_AGE_DAYS} -delete 2>/dev/null || true
|
|
find "$NAS_LOG_DIR" -maxdepth 1 -name "karakeep-backup-*.md" -mtime +${MAX_BACKUP_AGE_DAYS} -delete 2>/dev/null || true
|
|
fi
|
|
|
|
# Clean up old local log files
|
|
find "$LOG_ROOT" -name "karakeep-backup-*.log" -mtime +${MAX_BACKUP_AGE_DAYS} -delete 2>/dev/null || true
|
|
find "$LOG_ROOT" -name "karakeep-backup-*.md" -mtime +${MAX_BACKUP_AGE_DAYS} -delete 2>/dev/null || true
|
|
|
|
log_success "Cleanup completed"
|
|
}
|
|
|
|
# Send notification
|
|
send_notification() {
|
|
local title="$1"
|
|
local message="$2"
|
|
local status="${3:-info}"
|
|
local success_count="${4:-0}"
|
|
local failed_count="${5:-0}"
|
|
local hostname
|
|
hostname=$(hostname)
|
|
|
|
local enhanced_message
|
|
printf -v enhanced_message "%s\n\nVolumes: %d\nSuccessful: %d\nFailed: %d\nHost: %s\nBackup: %s" \
|
|
"$message" "${#KARAKEEP_VOLUMES[@]}" "$success_count" "$failed_count" "$hostname" "$BACKUP_DEST"
|
|
|
|
case "$status" in
|
|
"success") log_success "$title: $message" ;;
|
|
"error") log_error "$title: $message" ;;
|
|
"warning") log_warning "$title: $message" ;;
|
|
*) log_info "$title: $message" ;;
|
|
esac
|
|
|
|
if [ -n "$WEBHOOK_URL" ] && [ "$DRY_RUN" != true ]; then
|
|
local tags="backup,karakeep,${hostname}"
|
|
[ "$failed_count" -gt 0 ] && tags="${tags},errors"
|
|
|
|
curl -s \
|
|
-H "tags:${tags}" \
|
|
-d "$enhanced_message" \
|
|
"$WEBHOOK_URL" 2>/dev/null || log_warning "Failed to send webhook notification"
|
|
fi
|
|
}
|
|
|
|
# Generate backup summary report
|
|
generate_summary_report() {
|
|
local success_count="$1"
|
|
local failed_count="$2"
|
|
local total_time="$3"
|
|
|
|
log_message "=== BACKUP SUMMARY REPORT ==="
|
|
log_message "Total Volumes: ${#KARAKEEP_VOLUMES[@]}"
|
|
log_message "Successful Backups: $success_count"
|
|
log_message "Failed Backups: $failed_count"
|
|
log_message "Total Time: ${total_time}s"
|
|
log_message "Backup Directory: $BACKUP_DEST"
|
|
log_message "Log File: $LOG_FILE"
|
|
log_message "Markdown Report: $MARKDOWN_LOG"
|
|
|
|
{
|
|
echo "# Karakeep Backup Summary Report"
|
|
echo "**Date**: $(date '+%Y-%m-%d %H:%M:%S')"
|
|
echo "**Host**: $(hostname)"
|
|
echo "**Total Volumes**: ${#KARAKEEP_VOLUMES[@]}"
|
|
echo "**Successful**: $success_count"
|
|
echo "**Failed**: $failed_count"
|
|
echo "**Duration**: ${total_time}s"
|
|
echo "**Backup Directory**: $BACKUP_DEST"
|
|
echo ""
|
|
} >> "$MARKDOWN_LOG"
|
|
}
|
|
|
|
# Main backup execution
|
|
main() {
|
|
local script_start_time
|
|
script_start_time=$(date +%s)
|
|
local containers_stopped=false
|
|
|
|
log_message "=== KARAKEEP BACKUP STARTED ==="
|
|
log_message "Host: $(hostname)"
|
|
log_message "Timestamp: $BACKUP_TIMESTAMP"
|
|
log_message "Dry Run: $DRY_RUN"
|
|
log_message "Stop Containers: $STOP_CONTAINERS"
|
|
log_message "Verify Backups: $VERIFY_BACKUPS"
|
|
log_message "Backup Destination: $BACKUP_DEST"
|
|
|
|
# Initialize metrics if enabled
|
|
if [[ "$METRICS_ENABLED" == "true" ]]; then
|
|
metrics_backup_start "karakeep" "Karakeep volume backup (hoarder_data, hoarder_meilisearch)" "$LOCAL_BACKUP_DIR"
|
|
metrics_status_update "initializing" "Preparing Karakeep backup"
|
|
fi
|
|
|
|
# Initialize logging
|
|
initialize_json_log
|
|
|
|
{
|
|
echo "# Karakeep Backup Report"
|
|
echo "**Started**: $(date '+%Y-%m-%d %H:%M:%S')"
|
|
echo "**Host**: $(hostname)"
|
|
echo "**Backup Timestamp**: $BACKUP_TIMESTAMP"
|
|
echo ""
|
|
} > "$MARKDOWN_LOG"
|
|
|
|
# Pre-flight: Docker available?
|
|
if ! docker info > /dev/null 2>&1; then
|
|
log_error "Docker is not running or not accessible"
|
|
if [[ "$METRICS_ENABLED" == "true" ]]; then
|
|
metrics_backup_complete "failed" "Docker is not accessible"
|
|
fi
|
|
send_notification "Karakeep Backup Failed" "Docker is not accessible" "error" 0 "${#KARAKEEP_VOLUMES[@]}"
|
|
exit 1
|
|
fi
|
|
|
|
# Pre-flight: disk space check on local backup dir
|
|
if [[ "$METRICS_ENABLED" == "true" ]]; then
|
|
metrics_status_update "checking" "Running pre-flight checks"
|
|
fi
|
|
|
|
mkdir -p "$LOCAL_BACKUP_DIR"
|
|
if ! check_disk_space "$LOCAL_BACKUP_DIR" 500; then
|
|
if [[ "$METRICS_ENABLED" == "true" ]]; then
|
|
metrics_backup_complete "failed" "Insufficient local disk space"
|
|
fi
|
|
send_notification "Karakeep Backup Failed" "Insufficient local disk space" "error" 0 "${#KARAKEEP_VOLUMES[@]}"
|
|
exit 1
|
|
fi
|
|
|
|
# Ensure containers are restarted on unexpected exit
|
|
trap 'if [[ "$containers_stopped" == "true" ]]; then log_warning "Restarting containers after unexpected exit..."; start_containers || true; fi' EXIT INT TERM
|
|
|
|
# Stop containers for consistent snapshot
|
|
if [ "$STOP_CONTAINERS" == true ] && [ "$DRY_RUN" != true ]; then
|
|
if [[ "$METRICS_ENABLED" == "true" ]]; then
|
|
metrics_status_update "backing_up" "Stopping containers for consistent backup"
|
|
fi
|
|
if ! stop_containers; then
|
|
log_warning "Could not stop containers - proceeding with hot backup"
|
|
else
|
|
containers_stopped=true
|
|
fi
|
|
fi
|
|
|
|
# Back up each volume
|
|
if [[ "$METRICS_ENABLED" == "true" ]]; then
|
|
metrics_status_update "backing_up" "Archiving Karakeep volumes"
|
|
fi
|
|
|
|
local success_count=0
|
|
local failed_count=0
|
|
local backup_results=()
|
|
|
|
for volume_name in "${!KARAKEEP_VOLUMES[@]}"; do
|
|
if backup_volume "$volume_name"; then
|
|
success_count=$((success_count + 1))
|
|
backup_results+=("✓ $volume_name")
|
|
else
|
|
failed_count=$((failed_count + 1))
|
|
backup_results+=("✗ $volume_name")
|
|
fi
|
|
done
|
|
|
|
# Restart containers as soon as volumes are archived
|
|
if [ "$containers_stopped" == true ]; then
|
|
if ! start_containers; then
|
|
log_error "CRITICAL: Failed to restart Karakeep containers after backup"
|
|
send_notification "Karakeep Backup WARNING" "Containers failed to restart after backup - manual intervention required" "error" "$success_count" "$failed_count"
|
|
fi
|
|
containers_stopped=false
|
|
fi
|
|
|
|
# Copy to NAS if available and not skipped
|
|
if [ "$SKIP_NAS" != true ] && [ "$DRY_RUN" != true ]; then
|
|
if [[ "$METRICS_ENABLED" == "true" ]]; then
|
|
metrics_status_update "backing_up" "Copying backup to NAS"
|
|
fi
|
|
if check_nas_mount; then
|
|
if ! copy_to_nas "$BACKUP_DEST"; then
|
|
log_warning "NAS copy failed - local backup is still available at $BACKUP_DEST"
|
|
fi
|
|
else
|
|
log_warning "NAS not available - backup retained locally at $BACKUP_DEST"
|
|
fi
|
|
fi
|
|
|
|
# Calculate elapsed time
|
|
local script_end_time
|
|
script_end_time=$(date +%s)
|
|
local total_time=$((script_end_time - script_start_time))
|
|
|
|
track_performance "full_karakeep_backup" "$script_start_time" "$script_end_time"
|
|
|
|
# Clean up old backups
|
|
if [ "$DRY_RUN" != true ]; then
|
|
if [[ "$METRICS_ENABLED" == "true" ]]; then
|
|
metrics_status_update "cleaning_up" "Removing old backup archives"
|
|
fi
|
|
cleanup_old_backups
|
|
fi
|
|
|
|
# Generate summary
|
|
generate_summary_report "$success_count" "$failed_count" "$total_time"
|
|
|
|
{
|
|
echo "## Backup Results"
|
|
for result in "${backup_results[@]}"; do
|
|
echo "- $result"
|
|
done
|
|
echo ""
|
|
echo "**Completed**: $(date '+%Y-%m-%d %H:%M:%S')"
|
|
echo "**Duration**: ${total_time}s"
|
|
} >> "$MARKDOWN_LOG"
|
|
|
|
# Copy logs to NAS
|
|
if [ "$DRY_RUN" != true ]; then
|
|
copy_logs_to_nas
|
|
fi
|
|
|
|
# Send notification
|
|
local status="success"
|
|
local message="Karakeep backup completed successfully (${success_count}/${#KARAKEEP_VOLUMES[@]} volumes)"
|
|
|
|
if [ "$DRY_RUN" == true ]; then
|
|
message="Karakeep backup dry run completed"
|
|
status="info"
|
|
elif [ "$failed_count" -gt 0 ]; then
|
|
status="warning"
|
|
message="Karakeep backup completed with $failed_count failure(s)"
|
|
fi
|
|
|
|
send_notification "Karakeep Backup Complete" "$message" "$status" "$success_count" "$failed_count"
|
|
|
|
# Finalize metrics
|
|
if [[ "$METRICS_ENABLED" == "true" ]]; then
|
|
if [ "$failed_count" -gt 0 ]; then
|
|
metrics_backup_complete "completed_with_errors" "Karakeep backup completed with $failed_count failure(s)"
|
|
elif [ "$DRY_RUN" == true ]; then
|
|
metrics_backup_complete "success" "Karakeep backup dry run completed"
|
|
else
|
|
metrics_backup_complete "success" "Karakeep backup completed successfully"
|
|
fi
|
|
fi
|
|
|
|
if [ "$failed_count" -gt 0 ]; then
|
|
exit 1
|
|
fi
|
|
|
|
log_success "All Karakeep volume backups completed successfully!"
|
|
exit 0
|
|
}
|
|
|
|
main "$@"
|