mirror of
https://github.com/acedanger/shell.git
synced 2025-12-05 21:40:12 -08:00
Refactor variable assignments and improve script readability in validate-plex-backups.sh and validate-plex-recovery.sh
- Changed inline variable assignments to separate declaration and assignment for clarity. - Updated condition checks and log messages for better readability and consistency. - Added a backup of validate-plex-recovery.sh for safety. - Introduced a new script run-docker-tests.sh for testing setup in Docker containers. - Enhanced ssh-login.sh to improve condition checks and logging functionality.
This commit is contained in:
@@ -2,7 +2,7 @@
|
|||||||
|
|
||||||
# vaultwarden
|
# vaultwarden
|
||||||
docker stop vaultwarden
|
docker stop vaultwarden
|
||||||
tar zcf /home/acedanger/backup/docker-data/vaultwarden-data-bk-`date +%Y%m%d`.tar.gz /var/lib/docker/volumes/vaultwarden_data/_data
|
tar zcf "/home/acedanger/backup/docker-data/vaultwarden-data-bk-$(date +%Y%m%d).tar.gz" /var/lib/docker/volumes/vaultwarden_data/_data
|
||||||
docker start vaultwarden
|
docker start vaultwarden
|
||||||
|
|
||||||
# paperless
|
# paperless
|
||||||
@@ -14,12 +14,12 @@ docker start vaultwarden
|
|||||||
|
|
||||||
# uptime-kuma
|
# uptime-kuma
|
||||||
docker stop uptime-kuma
|
docker stop uptime-kuma
|
||||||
tar zcf /home/acedanger/backup/docker-data/uptime-kuma-data-bk-`date +%Y%m%d`.tar.gz /var/lib/docker/volumes/uptime-kuma/_data
|
tar zcf "/home/acedanger/backup/docker-data/uptime-kuma-data-bk-$(date +%Y%m%d).tar.gz" /var/lib/docker/volumes/uptime-kuma/_data
|
||||||
docker start uptime-kuma
|
docker start uptime-kuma
|
||||||
|
|
||||||
# send a notification to https://notify.peterwood.rocks\lab
|
# send a notification to https://notify.peterwood.rocks\lab
|
||||||
curl \
|
curl \
|
||||||
-H priority:default \
|
-H priority:default \
|
||||||
-H tags:backup,docker,vaultwarden,uptime-kuma,${HOSTNAME} \
|
-H tags:backup,docker,vaultwarden,uptime-kuma,"${HOSTNAME}" \
|
||||||
-d "Completed backup of vaultwarden, uptime-kuma" \
|
-d "Completed backup of vaultwarden, uptime-kuma" \
|
||||||
https://notify.peterwood.rocks/lab
|
https://notify.peterwood.rocks/lab
|
||||||
|
|||||||
@@ -54,10 +54,10 @@ usage() {
|
|||||||
# Check dependencies
|
# Check dependencies
|
||||||
check_dependencies() {
|
check_dependencies() {
|
||||||
local missing_deps=()
|
local missing_deps=()
|
||||||
|
|
||||||
command -v git >/dev/null 2>&1 || missing_deps+=("git")
|
command -v git >/dev/null 2>&1 || missing_deps+=("git")
|
||||||
command -v find >/dev/null 2>&1 || missing_deps+=("find")
|
command -v find >/dev/null 2>&1 || missing_deps+=("find")
|
||||||
|
|
||||||
if [ ${#missing_deps[@]} -ne 0 ]; then
|
if [ ${#missing_deps[@]} -ne 0 ]; then
|
||||||
echo -e "${RED}Error: Missing required dependencies: ${missing_deps[*]}${NC}"
|
echo -e "${RED}Error: Missing required dependencies: ${missing_deps[*]}${NC}"
|
||||||
echo "Please install the missing dependencies and try again."
|
echo "Please install the missing dependencies and try again."
|
||||||
@@ -68,12 +68,12 @@ check_dependencies() {
|
|||||||
# Find all .env files in docker directories
|
# Find all .env files in docker directories
|
||||||
find_env_files() {
|
find_env_files() {
|
||||||
local base_dir="$1"
|
local base_dir="$1"
|
||||||
|
|
||||||
if [ ! -d "$base_dir" ]; then
|
if [ ! -d "$base_dir" ]; then
|
||||||
echo -e "${YELLOW}Warning: Docker directory $base_dir does not exist${NC}"
|
echo -e "${YELLOW}Warning: Docker directory $base_dir does not exist${NC}"
|
||||||
return 0
|
return 0
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Find all .env files, including hidden ones and those with different extensions
|
# Find all .env files, including hidden ones and those with different extensions
|
||||||
find "$base_dir" -type f \( -name "*.env" -o -name ".env*" -o -name "env.*" \) 2>/dev/null | sort
|
find "$base_dir" -type f \( -name "*.env" -o -name ".env*" -o -name "env.*" \) 2>/dev/null | sort
|
||||||
}
|
}
|
||||||
@@ -82,17 +82,20 @@ find_env_files() {
|
|||||||
list_env_files() {
|
list_env_files() {
|
||||||
echo -e "${BLUE}=== Environment Files Found ===${NC}"
|
echo -e "${BLUE}=== Environment Files Found ===${NC}"
|
||||||
local count=0
|
local count=0
|
||||||
|
|
||||||
# Use a temp file to avoid subshell issues
|
# Use a temp file to avoid subshell issues
|
||||||
local temp_file=$(mktemp)
|
local temp_file
|
||||||
|
temp_file=$(mktemp)
|
||||||
find_env_files "$DOCKER_DIR" > "$temp_file"
|
find_env_files "$DOCKER_DIR" > "$temp_file"
|
||||||
|
|
||||||
while IFS= read -r env_file; do
|
while IFS= read -r env_file; do
|
||||||
if [ -n "$env_file" ]; then
|
if [ -n "$env_file" ]; then
|
||||||
local rel_path="${env_file#$DOCKER_DIR/}"
|
local rel_path="${env_file#"$DOCKER_DIR"/}"
|
||||||
local size=$(du -h "$env_file" 2>/dev/null | cut -f1)
|
local size
|
||||||
local modified=$(stat -c %y "$env_file" 2>/dev/null | cut -d' ' -f1)
|
local modified
|
||||||
|
size=$(du -h "$env_file" 2>/dev/null | cut -f1)
|
||||||
|
modified=$(stat -c %y "$env_file" 2>/dev/null | cut -d' ' -f1)
|
||||||
|
|
||||||
echo -e "${GREEN}📄 $rel_path${NC}"
|
echo -e "${GREEN}📄 $rel_path${NC}"
|
||||||
echo " Size: $size | Modified: $modified"
|
echo " Size: $size | Modified: $modified"
|
||||||
echo " Full path: $env_file"
|
echo " Full path: $env_file"
|
||||||
@@ -100,12 +103,12 @@ list_env_files() {
|
|||||||
count=$((count + 1))
|
count=$((count + 1))
|
||||||
fi
|
fi
|
||||||
done < "$temp_file"
|
done < "$temp_file"
|
||||||
|
|
||||||
# Clean up temp file
|
# Clean up temp file
|
||||||
rm -f "$temp_file"
|
rm -f "$temp_file"
|
||||||
|
|
||||||
echo -e "${BLUE}Total .env files found: $count${NC}"
|
echo -e "${BLUE}Total .env files found: $count${NC}"
|
||||||
|
|
||||||
if [ $count -eq 0 ]; then
|
if [ $count -eq 0 ]; then
|
||||||
echo -e "${YELLOW}No .env files found in $DOCKER_DIR${NC}"
|
echo -e "${YELLOW}No .env files found in $DOCKER_DIR${NC}"
|
||||||
echo "Make sure you have Docker containers with .env files in subdirectories."
|
echo "Make sure you have Docker containers with .env files in subdirectories."
|
||||||
@@ -115,26 +118,26 @@ list_env_files() {
|
|||||||
# Initialize backup repository
|
# Initialize backup repository
|
||||||
init_backup_repo() {
|
init_backup_repo() {
|
||||||
echo -e "${YELLOW}Initializing .env backup repository...${NC}"
|
echo -e "${YELLOW}Initializing .env backup repository...${NC}"
|
||||||
|
|
||||||
# Prompt for Gitea details if not provided
|
# Prompt for Gitea details if not provided
|
||||||
if [ -z "$GITEA_URL" ]; then
|
if [ -z "$GITEA_URL" ]; then
|
||||||
read -p "Enter your Gitea instance URL (e.g., https://git.yourdomain.com): " GITEA_URL
|
read -r -p "Enter your Gitea instance URL (e.g., https://git.yourdomain.com): " GITEA_URL
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [ -z "$GITEA_USERNAME" ]; then
|
if [ -z "$GITEA_USERNAME" ]; then
|
||||||
read -p "Enter your Gitea username: " GITEA_USERNAME
|
read -r -p "Enter your Gitea username: " GITEA_USERNAME
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Create backup directory
|
# Create backup directory
|
||||||
mkdir -p "$BACKUP_DIR"
|
mkdir -p "$BACKUP_DIR"
|
||||||
cd "$BACKUP_DIR"
|
cd "$BACKUP_DIR"
|
||||||
|
|
||||||
# Initialize git repository if not already done
|
# Initialize git repository if not already done
|
||||||
if [ ! -d ".git" ]; then
|
if [ ! -d ".git" ]; then
|
||||||
git init
|
git init
|
||||||
echo -e "${GREEN}Initialized local git repository${NC}"
|
echo -e "${GREEN}Initialized local git repository${NC}"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Create .gitignore for additional security
|
# Create .gitignore for additional security
|
||||||
cat > .gitignore << 'EOF'
|
cat > .gitignore << 'EOF'
|
||||||
# Temporary files
|
# Temporary files
|
||||||
@@ -150,7 +153,7 @@ Thumbs.db
|
|||||||
# Logs
|
# Logs
|
||||||
*.log
|
*.log
|
||||||
EOF
|
EOF
|
||||||
|
|
||||||
# Create README with important information
|
# Create README with important information
|
||||||
cat > README.md << 'EOF'
|
cat > README.md << 'EOF'
|
||||||
# Docker Environment Files Backup
|
# Docker Environment Files Backup
|
||||||
@@ -169,7 +172,7 @@ This repository contains sensitive configuration files including:
|
|||||||
|
|
||||||
## Structure
|
## Structure
|
||||||
|
|
||||||
```
|
$()`
|
||||||
docker-containers/
|
docker-containers/
|
||||||
├── container1/
|
├── container1/
|
||||||
│ ├── .env
|
│ ├── .env
|
||||||
@@ -177,7 +180,7 @@ docker-containers/
|
|||||||
├── container2/
|
├── container2/
|
||||||
│ └── .env
|
│ └── .env
|
||||||
└── ...
|
└── ...
|
||||||
```
|
$()`
|
||||||
|
|
||||||
## Usage
|
## Usage
|
||||||
|
|
||||||
@@ -189,46 +192,47 @@ docker-containers/
|
|||||||
|
|
||||||
This information is updated automatically by the backup script.
|
This information is updated automatically by the backup script.
|
||||||
EOF
|
EOF
|
||||||
|
|
||||||
# Create directory structure
|
# Create directory structure
|
||||||
mkdir -p docker-containers
|
mkdir -p docker-containers
|
||||||
|
|
||||||
# Set up remote if URL provided
|
# Set up remote if URL provided
|
||||||
if [ -n "$GITEA_URL" ] && [ -n "$GITEA_USERNAME" ]; then
|
if [ -n "$GITEA_URL" ] && [ -n "$GITEA_USERNAME" ]; then
|
||||||
local remote_url="${GITEA_URL%/}/${GITEA_USERNAME}/${BACKUP_REPO_NAME}.git"
|
local remote_url="${GITEA_URL%/}/${GITEA_USERNAME}/${BACKUP_REPO_NAME}.git"
|
||||||
|
|
||||||
# Check if remote already exists
|
# Check if remote already exists
|
||||||
if ! git remote get-url origin >/dev/null 2>&1; then
|
if ! git remote get-url origin >/dev/null 2>&1; then
|
||||||
git remote add origin "$remote_url"
|
git remote add origin "$remote_url"
|
||||||
echo -e "${GREEN}Added remote origin: $remote_url${NC}"
|
echo -e "${GREEN}Added remote origin: $remote_url${NC}"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Save configuration
|
# Save configuration
|
||||||
cat > .env-backup-config << EOF
|
cat > .env-backup-config << EOF
|
||||||
GITEA_URL="$GITEA_URL"
|
GITEA_URL="$GITEA_URL"
|
||||||
GITEA_USERNAME="$GITEA_USERNAME"
|
GITEA_USERNAME="$GITEA_USERNAME"
|
||||||
BACKUP_REPO_NAME="$BACKUP_REPO_NAME"
|
BACKUP_REPO_NAME="$BACKUP_REPO_NAME"
|
||||||
EOF
|
EOF
|
||||||
|
|
||||||
echo -e "${YELLOW}Configuration saved to .env-backup-config${NC}"
|
echo -e "${YELLOW}Configuration saved to .env-backup-config${NC}"
|
||||||
echo -e "${BLUE}Next steps:${NC}"
|
echo -e "${BLUE}Next steps:${NC}"
|
||||||
echo "1. Create a private repository '$BACKUP_REPO_NAME' in your Gitea instance"
|
echo "1. Create a private repository '$BACKUP_REPO_NAME' in your Gitea instance"
|
||||||
echo "2. Run the backup script to perform your first backup"
|
echo "2. Run the backup script to perform your first backup"
|
||||||
echo "3. The script will attempt to push to the remote repository"
|
echo "3. The script will attempt to push to the remote repository"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Initial commit
|
# Initial commit
|
||||||
git add .
|
git add .
|
||||||
git commit -m "Initial setup of .env backup repository" || echo "Nothing to commit"
|
git commit -m "Initial setup of .env backup repository" || echo "Nothing to commit"
|
||||||
|
|
||||||
log "Backup repository initialized at $BACKUP_DIR"
|
log "Backup repository initialized at $BACKUP_DIR"
|
||||||
}
|
}
|
||||||
|
|
||||||
# Load configuration
|
# Load configuration
|
||||||
load_config() {
|
load_config() {
|
||||||
local config_file="$BACKUP_DIR/.env-backup-config"
|
local config_file="$BACKUP_DIR/.env-backup-config"
|
||||||
|
|
||||||
if [ -f "$config_file" ]; then
|
if [ -f "$config_file" ]; then
|
||||||
|
# shellcheck source=/dev/null
|
||||||
source "$config_file"
|
source "$config_file"
|
||||||
fi
|
fi
|
||||||
}
|
}
|
||||||
@@ -237,42 +241,45 @@ load_config() {
|
|||||||
backup_env_files() {
|
backup_env_files() {
|
||||||
local dry_run="$1"
|
local dry_run="$1"
|
||||||
local force="$2"
|
local force="$2"
|
||||||
|
|
||||||
echo -e "${YELLOW}Starting .env files backup...${NC}"
|
echo -e "${YELLOW}Starting .env files backup...${NC}"
|
||||||
|
|
||||||
# Check if backup directory exists
|
# Check if backup directory exists
|
||||||
if [ ! -d "$BACKUP_DIR" ]; then
|
if [ ! -d "$BACKUP_DIR" ]; then
|
||||||
echo -e "${RED}Backup directory not found. Run with --init first.${NC}"
|
echo -e "${RED}Backup directory not found. Run with --init first.${NC}"
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
cd "$BACKUP_DIR"
|
cd "$BACKUP_DIR"
|
||||||
load_config
|
load_config
|
||||||
|
|
||||||
# Create timestamp
|
# Create timestamp
|
||||||
local timestamp=$(date '+%Y-%m-%d %H:%M:%S')
|
local timestamp
|
||||||
|
timestamp=$(date '+%Y-%m-%d %H:%M:%S')
|
||||||
local backup_count=0
|
local backup_count=0
|
||||||
local unchanged_count=0
|
local unchanged_count=0
|
||||||
|
|
||||||
# Process each .env file using a temp file to avoid subshell issues
|
# Process each .env file using a temp file to avoid subshell issues
|
||||||
local temp_file=$(mktemp)
|
local temp_file
|
||||||
|
temp_file=$(mktemp)
|
||||||
find_env_files "$DOCKER_DIR" > "$temp_file"
|
find_env_files "$DOCKER_DIR" > "$temp_file"
|
||||||
|
|
||||||
while IFS= read -r env_file; do
|
while IFS= read -r env_file; do
|
||||||
if [ -n "$env_file" ]; then
|
if [ -n "$env_file" ]; then
|
||||||
# Determine relative path and backup location
|
# Determine relative path and backup location
|
||||||
local rel_path="${env_file#$DOCKER_DIR/}"
|
local rel_path="${env_file#"$DOCKER_DIR"/}"
|
||||||
local backup_path="docker-containers/$rel_path"
|
local backup_path="docker-containers/$rel_path"
|
||||||
local backup_dir=$(dirname "$backup_path")
|
local backup_dir
|
||||||
|
backup_dir=$(dirname "$backup_path")
|
||||||
|
|
||||||
if [ "$dry_run" = "true" ]; then
|
if [ "$dry_run" = "true" ]; then
|
||||||
echo -e "${BLUE}Would backup: $rel_path${NC}"
|
echo -e "${BLUE}Would backup: $rel_path${NC}"
|
||||||
continue
|
continue
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Create backup directory structure
|
# Create backup directory structure
|
||||||
mkdir -p "$backup_dir"
|
mkdir -p "$backup_dir"
|
||||||
|
|
||||||
# Check if file has changed
|
# Check if file has changed
|
||||||
local needs_backup=true
|
local needs_backup=true
|
||||||
if [ -f "$backup_path" ] && [ "$force" != "true" ]; then
|
if [ -f "$backup_path" ] && [ "$force" != "true" ]; then
|
||||||
@@ -281,17 +288,18 @@ backup_env_files() {
|
|||||||
unchanged_count=$((unchanged_count + 1))
|
unchanged_count=$((unchanged_count + 1))
|
||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [ "$needs_backup" = "true" ]; then
|
if [ "$needs_backup" = "true" ]; then
|
||||||
# Copy the file
|
# Copy the file
|
||||||
cp "$env_file" "$backup_path"
|
cp "$env_file" "$backup_path"
|
||||||
echo -e "${GREEN}✓ Backed up: $rel_path${NC}"
|
echo -e "${GREEN}✓ Backed up: $rel_path${NC}"
|
||||||
backup_count=$((backup_count + 1))
|
backup_count=$((backup_count + 1))
|
||||||
|
|
||||||
# Also create a reference docker-compose.yml if it exists
|
# Also create a reference docker-compose.yml if it exists
|
||||||
local compose_file=$(dirname "$env_file")/docker-compose.yml
|
local compose_file
|
||||||
|
compose_file=$(dirname "$env_file")/docker-compose.yml
|
||||||
local compose_backup="$backup_dir/docker-compose.yml.ref"
|
local compose_backup="$backup_dir/docker-compose.yml.ref"
|
||||||
|
|
||||||
if [ -f "$compose_file" ] && [ ! -f "$compose_backup" ]; then
|
if [ -f "$compose_file" ] && [ ! -f "$compose_backup" ]; then
|
||||||
cp "$compose_file" "$compose_backup"
|
cp "$compose_file" "$compose_backup"
|
||||||
echo -e "${BLUE} + Reference: docker-compose.yml${NC}"
|
echo -e "${BLUE} + Reference: docker-compose.yml${NC}"
|
||||||
@@ -301,15 +309,15 @@ backup_env_files() {
|
|||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
done < "$temp_file"
|
done < "$temp_file"
|
||||||
|
|
||||||
# Clean up temp file
|
# Clean up temp file
|
||||||
rm -f "$temp_file"
|
rm -f "$temp_file"
|
||||||
|
|
||||||
if [ "$dry_run" = "true" ]; then
|
if [ "$dry_run" = "true" ]; then
|
||||||
echo -e "${BLUE}Dry run completed. No files were actually backed up.${NC}"
|
echo -e "${BLUE}Dry run completed. No files were actually backed up.${NC}"
|
||||||
return 0
|
return 0
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Update README with backup information
|
# Update README with backup information
|
||||||
sed -i "/^## Last Backup/,$ d" README.md
|
sed -i "/^## Last Backup/,$ d" README.md
|
||||||
cat >> README.md << EOF
|
cat >> README.md << EOF
|
||||||
@@ -323,10 +331,10 @@ backup_env_files() {
|
|||||||
|
|
||||||
Generated by backup-env-files.sh
|
Generated by backup-env-files.sh
|
||||||
EOF
|
EOF
|
||||||
|
|
||||||
# Commit changes
|
# Commit changes
|
||||||
git add .
|
git add .
|
||||||
|
|
||||||
if git diff --staged --quiet; then
|
if git diff --staged --quiet; then
|
||||||
echo -e "${YELLOW}No changes to commit${NC}"
|
echo -e "${YELLOW}No changes to commit${NC}"
|
||||||
log "Backup completed - no changes detected"
|
log "Backup completed - no changes detected"
|
||||||
@@ -336,9 +344,9 @@ EOF
|
|||||||
- Files backed up: $backup_count
|
- Files backed up: $backup_count
|
||||||
- Files unchanged: $unchanged_count
|
- Files unchanged: $unchanged_count
|
||||||
- Total files: $((backup_count + unchanged_count))"
|
- Total files: $((backup_count + unchanged_count))"
|
||||||
|
|
||||||
echo -e "${GREEN}Changes committed to local repository${NC}"
|
echo -e "${GREEN}Changes committed to local repository${NC}"
|
||||||
|
|
||||||
# Push to remote if configured
|
# Push to remote if configured
|
||||||
if git remote get-url origin >/dev/null 2>&1; then
|
if git remote get-url origin >/dev/null 2>&1; then
|
||||||
echo -e "${YELLOW}Pushing to remote repository...${NC}"
|
echo -e "${YELLOW}Pushing to remote repository...${NC}"
|
||||||
@@ -357,7 +365,7 @@ EOF
|
|||||||
log "Backup completed locally - $backup_count files backed up, $unchanged_count unchanged"
|
log "Backup completed locally - $backup_count files backed up, $unchanged_count unchanged"
|
||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
|
|
||||||
echo -e "${GREEN}Backup completed!${NC}"
|
echo -e "${GREEN}Backup completed!${NC}"
|
||||||
echo -e "${BLUE}Summary:${NC}"
|
echo -e "${BLUE}Summary:${NC}"
|
||||||
echo " - Files backed up: $backup_count"
|
echo " - Files backed up: $backup_count"
|
||||||
@@ -368,42 +376,44 @@ EOF
|
|||||||
# Restore .env files
|
# Restore .env files
|
||||||
restore_env_files() {
|
restore_env_files() {
|
||||||
echo -e "${YELLOW}Starting .env files restore...${NC}"
|
echo -e "${YELLOW}Starting .env files restore...${NC}"
|
||||||
|
|
||||||
if [ ! -d "$BACKUP_DIR" ]; then
|
if [ ! -d "$BACKUP_DIR" ]; then
|
||||||
echo -e "${RED}Backup directory not found at $BACKUP_DIR${NC}"
|
echo -e "${RED}Backup directory not found at $BACKUP_DIR${NC}"
|
||||||
echo "Either run --init first or clone your backup repository to this location."
|
echo "Either run --init first or clone your backup repository to this location."
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
cd "$BACKUP_DIR"
|
cd "$BACKUP_DIR"
|
||||||
load_config
|
load_config
|
||||||
|
|
||||||
# Pull latest changes if remote is configured
|
# Pull latest changes if remote is configured
|
||||||
if git remote get-url origin >/dev/null 2>&1; then
|
if git remote get-url origin >/dev/null 2>&1; then
|
||||||
echo -e "${YELLOW}Pulling latest changes from remote...${NC}"
|
echo -e "${YELLOW}Pulling latest changes from remote...${NC}"
|
||||||
git pull origin main 2>/dev/null || git pull origin master 2>/dev/null || true
|
git pull origin main 2>/dev/null || git pull origin master 2>/dev/null || true
|
||||||
fi
|
fi
|
||||||
|
|
||||||
local restore_count=0
|
local restore_count=0
|
||||||
local error_count=0
|
local error_count=0
|
||||||
|
|
||||||
# Use a temp file to avoid subshell issues
|
# Use a temp file to avoid subshell issues
|
||||||
local temp_file=$(mktemp)
|
local temp_file
|
||||||
|
temp_file=$(mktemp)
|
||||||
find docker-containers -name "*.env" -type f 2>/dev/null > "$temp_file"
|
find docker-containers -name "*.env" -type f 2>/dev/null > "$temp_file"
|
||||||
|
|
||||||
while IFS= read -r backup_file; do
|
while IFS= read -r backup_file; do
|
||||||
if [ -n "$backup_file" ]; then
|
if [ -n "$backup_file" ]; then
|
||||||
# Determine target path
|
# Determine target path
|
||||||
local rel_path="${backup_file#docker-containers/}"
|
local rel_path="${backup_file#docker-containers/}"
|
||||||
local target_file="$DOCKER_DIR/$rel_path"
|
local target_file="$DOCKER_DIR/$rel_path"
|
||||||
local target_dir=$(dirname "$target_file")
|
local target_dir
|
||||||
|
target_dir=$(dirname "$target_file")
|
||||||
|
|
||||||
# Create target directory if it doesn't exist
|
# Create target directory if it doesn't exist
|
||||||
if [ ! -d "$target_dir" ]; then
|
if [ ! -d "$target_dir" ]; then
|
||||||
echo -e "${YELLOW}Creating directory: $target_dir${NC}"
|
echo -e "${YELLOW}Creating directory: $target_dir${NC}"
|
||||||
mkdir -p "$target_dir"
|
mkdir -p "$target_dir"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Ask for confirmation if file exists and is different
|
# Ask for confirmation if file exists and is different
|
||||||
if [ -f "$target_file" ]; then
|
if [ -f "$target_file" ]; then
|
||||||
if ! cmp -s "$backup_file" "$target_file"; then
|
if ! cmp -s "$backup_file" "$target_file"; then
|
||||||
@@ -419,7 +429,7 @@ restore_env_files() {
|
|||||||
continue
|
continue
|
||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Copy the file
|
# Copy the file
|
||||||
if cp "$backup_file" "$target_file"; then
|
if cp "$backup_file" "$target_file"; then
|
||||||
echo -e "${GREEN}✓ Restored: $rel_path${NC}"
|
echo -e "${GREEN}✓ Restored: $rel_path${NC}"
|
||||||
@@ -430,15 +440,15 @@ restore_env_files() {
|
|||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
done < "$temp_file"
|
done < "$temp_file"
|
||||||
|
|
||||||
# Clean up temp file
|
# Clean up temp file
|
||||||
rm -f "$temp_file"
|
rm -f "$temp_file"
|
||||||
|
|
||||||
echo -e "${GREEN}Restore completed!${NC}"
|
echo -e "${GREEN}Restore completed!${NC}"
|
||||||
echo -e "${BLUE}Summary:${NC}"
|
echo -e "${BLUE}Summary:${NC}"
|
||||||
echo " - Files restored: $restore_count"
|
echo " - Files restored: $restore_count"
|
||||||
echo " - Errors: $error_count"
|
echo " - Errors: $error_count"
|
||||||
|
|
||||||
log "Restore completed - $restore_count files restored, $error_count errors"
|
log "Restore completed - $restore_count files restored, $error_count errors"
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -449,7 +459,7 @@ main() {
|
|||||||
local force=false
|
local force=false
|
||||||
local restore=false
|
local restore=false
|
||||||
local list_files=false
|
local list_files=false
|
||||||
|
|
||||||
# Parse command line arguments
|
# Parse command line arguments
|
||||||
while [[ $# -gt 0 ]]; do
|
while [[ $# -gt 0 ]]; do
|
||||||
case $1 in
|
case $1 in
|
||||||
@@ -492,10 +502,10 @@ main() {
|
|||||||
;;
|
;;
|
||||||
esac
|
esac
|
||||||
done
|
done
|
||||||
|
|
||||||
# Check dependencies
|
# Check dependencies
|
||||||
check_dependencies
|
check_dependencies
|
||||||
|
|
||||||
# Execute requested action
|
# Execute requested action
|
||||||
if [ "$list_files" = true ]; then
|
if [ "$list_files" = true ]; then
|
||||||
list_env_files
|
list_env_files
|
||||||
|
|||||||
512
backup-env-files.sh.sc2162_backup
Executable file
512
backup-env-files.sh.sc2162_backup
Executable file
@@ -0,0 +1,512 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
# backup-env-files.sh - Backup .env files to private Gitea repository
|
||||||
|
# Author: Shell Repository
|
||||||
|
# Description: Securely backup and version control .env files from ~/docker/* directories
|
||||||
|
|
||||||
|
set -e
|
||||||
|
|
||||||
|
# Colors for output
|
||||||
|
GREEN='\033[0;32m'
|
||||||
|
YELLOW='\033[0;33m'
|
||||||
|
RED='\033[0;31m'
|
||||||
|
BLUE='\033[0;34m'
|
||||||
|
NC='\033[0m' # No Color
|
||||||
|
|
||||||
|
# Configuration
|
||||||
|
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||||
|
DOCKER_DIR="$HOME/docker"
|
||||||
|
BACKUP_REPO_NAME="docker-env-backup"
|
||||||
|
BACKUP_DIR="$HOME/.env-backup"
|
||||||
|
LOG_FILE="$SCRIPT_DIR/logs/env-backup.log"
|
||||||
|
|
||||||
|
# Ensure logs directory exists
|
||||||
|
mkdir -p "$(dirname "$LOG_FILE")"
|
||||||
|
|
||||||
|
# Logging function
|
||||||
|
log() {
|
||||||
|
echo "$(date '+%Y-%m-%d %H:%M:%S') - $1" | tee -a "$LOG_FILE"
|
||||||
|
}
|
||||||
|
|
||||||
|
# Display usage information
|
||||||
|
usage() {
|
||||||
|
echo "Usage: $0 [OPTIONS]"
|
||||||
|
echo ""
|
||||||
|
echo "Backup .env files from ~/docker/* to private Gitea repository"
|
||||||
|
echo ""
|
||||||
|
echo "Options:"
|
||||||
|
echo " -h, --help Show this help message"
|
||||||
|
echo " -i, --init Initialize the backup repository"
|
||||||
|
echo " -f, --force Force overwrite existing files"
|
||||||
|
echo " -d, --dry-run Show what would be backed up without doing it"
|
||||||
|
echo " -r, --restore Restore .env files from backup"
|
||||||
|
echo " -l, --list List all .env files found"
|
||||||
|
echo " -g, --gitea-url URL Set Gitea instance URL"
|
||||||
|
echo " -u, --username USER Set Gitea username"
|
||||||
|
echo ""
|
||||||
|
echo "Examples:"
|
||||||
|
echo " $0 --init # First time setup"
|
||||||
|
echo " $0 # Regular backup"
|
||||||
|
echo " $0 --dry-run # See what would be backed up"
|
||||||
|
echo " $0 --restore # Restore files from backup"
|
||||||
|
}
|
||||||
|
|
||||||
|
# Check dependencies
|
||||||
|
check_dependencies() {
|
||||||
|
local missing_deps=()
|
||||||
|
|
||||||
|
command -v git >/dev/null 2>&1 || missing_deps+=("git")
|
||||||
|
command -v find >/dev/null 2>&1 || missing_deps+=("find")
|
||||||
|
|
||||||
|
if [ ${#missing_deps[@]} -ne 0 ]; then
|
||||||
|
echo -e "${RED}Error: Missing required dependencies: ${missing_deps[*]}${NC}"
|
||||||
|
echo "Please install the missing dependencies and try again."
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
# Find all .env files in docker directories
|
||||||
|
find_env_files() {
|
||||||
|
local base_dir="$1"
|
||||||
|
|
||||||
|
if [ ! -d "$base_dir" ]; then
|
||||||
|
echo -e "${YELLOW}Warning: Docker directory $base_dir does not exist${NC}"
|
||||||
|
return 0
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Find all .env files, including hidden ones and those with different extensions
|
||||||
|
find "$base_dir" -type f \( -name "*.env" -o -name ".env*" -o -name "env.*" \) 2>/dev/null | sort
|
||||||
|
}
|
||||||
|
|
||||||
|
# List all .env files
|
||||||
|
list_env_files() {
|
||||||
|
echo -e "${BLUE}=== Environment Files Found ===${NC}"
|
||||||
|
local count=0
|
||||||
|
|
||||||
|
# Use a temp file to avoid subshell issues
|
||||||
|
local temp_file=$(mktemp)
|
||||||
|
find_env_files "$DOCKER_DIR" > "$temp_file"
|
||||||
|
|
||||||
|
while IFS= read -r env_file; do
|
||||||
|
if [ -n "$env_file" ]; then
|
||||||
|
local rel_path="${env_file#$DOCKER_DIR/}"
|
||||||
|
local size=$(du -h "$env_file" 2>/dev/null | cut -f1)
|
||||||
|
local modified=$(stat -c %y "$env_file" 2>/dev/null | cut -d' ' -f1)
|
||||||
|
|
||||||
|
echo -e "${GREEN}📄 $rel_path${NC}"
|
||||||
|
echo " Size: $size | Modified: $modified"
|
||||||
|
echo " Full path: $env_file"
|
||||||
|
echo ""
|
||||||
|
count=$((count + 1))
|
||||||
|
fi
|
||||||
|
done < "$temp_file"
|
||||||
|
|
||||||
|
# Clean up temp file
|
||||||
|
rm -f "$temp_file"
|
||||||
|
|
||||||
|
echo -e "${BLUE}Total .env files found: $count${NC}"
|
||||||
|
|
||||||
|
if [ $count -eq 0 ]; then
|
||||||
|
echo -e "${YELLOW}No .env files found in $DOCKER_DIR${NC}"
|
||||||
|
echo "Make sure you have Docker containers with .env files in subdirectories."
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
# Initialize backup repository
|
||||||
|
init_backup_repo() {
|
||||||
|
echo -e "${YELLOW}Initializing .env backup repository...${NC}"
|
||||||
|
|
||||||
|
# Prompt for Gitea details if not provided
|
||||||
|
if [ -z "$GITEA_URL" ]; then
|
||||||
|
read -p "Enter your Gitea instance URL (e.g., https://git.yourdomain.com): " GITEA_URL
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ -z "$GITEA_USERNAME" ]; then
|
||||||
|
read -p "Enter your Gitea username: " GITEA_USERNAME
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Create backup directory
|
||||||
|
mkdir -p "$BACKUP_DIR"
|
||||||
|
cd "$BACKUP_DIR"
|
||||||
|
|
||||||
|
# Initialize git repository if not already done
|
||||||
|
if [ ! -d ".git" ]; then
|
||||||
|
git init
|
||||||
|
echo -e "${GREEN}Initialized local git repository${NC}"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Create .gitignore for additional security
|
||||||
|
cat > .gitignore << 'EOF'
|
||||||
|
# Temporary files
|
||||||
|
*.tmp
|
||||||
|
*.swp
|
||||||
|
*.bak
|
||||||
|
*~
|
||||||
|
|
||||||
|
# OS generated files
|
||||||
|
.DS_Store
|
||||||
|
Thumbs.db
|
||||||
|
|
||||||
|
# Logs
|
||||||
|
*.log
|
||||||
|
EOF
|
||||||
|
|
||||||
|
# Create README with important information
|
||||||
|
cat > README.md << 'EOF'
|
||||||
|
# Docker Environment Files Backup
|
||||||
|
|
||||||
|
This repository contains backup copies of .env files from Docker containers.
|
||||||
|
|
||||||
|
## ⚠️ SECURITY WARNING ⚠️
|
||||||
|
|
||||||
|
This repository contains sensitive configuration files including:
|
||||||
|
- API keys
|
||||||
|
- Database passwords
|
||||||
|
- Secret tokens
|
||||||
|
- Private configurations
|
||||||
|
|
||||||
|
**NEVER make this repository public!**
|
||||||
|
|
||||||
|
## Structure
|
||||||
|
|
||||||
|
$()`
|
||||||
|
docker-containers/
|
||||||
|
├── container1/
|
||||||
|
│ ├── .env
|
||||||
|
│ └── docker-compose.yml (reference only)
|
||||||
|
├── container2/
|
||||||
|
│ └── .env
|
||||||
|
└── ...
|
||||||
|
$()`
|
||||||
|
|
||||||
|
## Usage
|
||||||
|
|
||||||
|
- Files are organized by container/service name
|
||||||
|
- Only .env files are backed up (no other sensitive files)
|
||||||
|
- Restore using the backup-env-files.sh script
|
||||||
|
|
||||||
|
## Last Backup
|
||||||
|
|
||||||
|
This information is updated automatically by the backup script.
|
||||||
|
EOF
|
||||||
|
|
||||||
|
# Create directory structure
|
||||||
|
mkdir -p docker-containers
|
||||||
|
|
||||||
|
# Set up remote if URL provided
|
||||||
|
if [ -n "$GITEA_URL" ] && [ -n "$GITEA_USERNAME" ]; then
|
||||||
|
local remote_url="${GITEA_URL%/}/${GITEA_USERNAME}/${BACKUP_REPO_NAME}.git"
|
||||||
|
|
||||||
|
# Check if remote already exists
|
||||||
|
if ! git remote get-url origin >/dev/null 2>&1; then
|
||||||
|
git remote add origin "$remote_url"
|
||||||
|
echo -e "${GREEN}Added remote origin: $remote_url${NC}"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Save configuration
|
||||||
|
cat > .env-backup-config << EOF
|
||||||
|
GITEA_URL="$GITEA_URL"
|
||||||
|
GITEA_USERNAME="$GITEA_USERNAME"
|
||||||
|
BACKUP_REPO_NAME="$BACKUP_REPO_NAME"
|
||||||
|
EOF
|
||||||
|
|
||||||
|
echo -e "${YELLOW}Configuration saved to .env-backup-config${NC}"
|
||||||
|
echo -e "${BLUE}Next steps:${NC}"
|
||||||
|
echo "1. Create a private repository '$BACKUP_REPO_NAME' in your Gitea instance"
|
||||||
|
echo "2. Run the backup script to perform your first backup"
|
||||||
|
echo "3. The script will attempt to push to the remote repository"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Initial commit
|
||||||
|
git add .
|
||||||
|
git commit -m "Initial setup of .env backup repository" || echo "Nothing to commit"
|
||||||
|
|
||||||
|
log "Backup repository initialized at $BACKUP_DIR"
|
||||||
|
}
|
||||||
|
|
||||||
|
# Load configuration
|
||||||
|
load_config() {
|
||||||
|
local config_file="$BACKUP_DIR/.env-backup-config"
|
||||||
|
|
||||||
|
if [ -f "$config_file" ]; then
|
||||||
|
source "$config_file"
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
# Backup .env files
|
||||||
|
backup_env_files() {
|
||||||
|
local dry_run="$1"
|
||||||
|
local force="$2"
|
||||||
|
|
||||||
|
echo -e "${YELLOW}Starting .env files backup...${NC}"
|
||||||
|
|
||||||
|
# Check if backup directory exists
|
||||||
|
if [ ! -d "$BACKUP_DIR" ]; then
|
||||||
|
echo -e "${RED}Backup directory not found. Run with --init first.${NC}"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
cd "$BACKUP_DIR"
|
||||||
|
load_config
|
||||||
|
|
||||||
|
# Create timestamp
|
||||||
|
local timestamp=$(date '+%Y-%m-%d %H:%M:%S')
|
||||||
|
local backup_count=0
|
||||||
|
local unchanged_count=0
|
||||||
|
|
||||||
|
# Process each .env file using a temp file to avoid subshell issues
|
||||||
|
local temp_file=$(mktemp)
|
||||||
|
find_env_files "$DOCKER_DIR" > "$temp_file"
|
||||||
|
|
||||||
|
while IFS= read -r env_file; do
|
||||||
|
if [ -n "$env_file" ]; then
|
||||||
|
# Determine relative path and backup location
|
||||||
|
local rel_path="${env_file#$DOCKER_DIR/}"
|
||||||
|
local backup_path="docker-containers/$rel_path"
|
||||||
|
local backup_dir=$(dirname "$backup_path")
|
||||||
|
|
||||||
|
if [ "$dry_run" = "true" ]; then
|
||||||
|
echo -e "${BLUE}Would backup: $rel_path${NC}"
|
||||||
|
continue
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Create backup directory structure
|
||||||
|
mkdir -p "$backup_dir"
|
||||||
|
|
||||||
|
# Check if file has changed
|
||||||
|
local needs_backup=true
|
||||||
|
if [ -f "$backup_path" ] && [ "$force" != "true" ]; then
|
||||||
|
if cmp -s "$env_file" "$backup_path"; then
|
||||||
|
needs_backup=false
|
||||||
|
unchanged_count=$((unchanged_count + 1))
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ "$needs_backup" = "true" ]; then
|
||||||
|
# Copy the file
|
||||||
|
cp "$env_file" "$backup_path"
|
||||||
|
echo -e "${GREEN}✓ Backed up: $rel_path${NC}"
|
||||||
|
backup_count=$((backup_count + 1))
|
||||||
|
|
||||||
|
# Also create a reference docker-compose.yml if it exists
|
||||||
|
local compose_file=$(dirname "$env_file")/docker-compose.yml
|
||||||
|
local compose_backup="$backup_dir/docker-compose.yml.ref"
|
||||||
|
|
||||||
|
if [ -f "$compose_file" ] && [ ! -f "$compose_backup" ]; then
|
||||||
|
cp "$compose_file" "$compose_backup"
|
||||||
|
echo -e "${BLUE} + Reference: docker-compose.yml${NC}"
|
||||||
|
fi
|
||||||
|
else
|
||||||
|
echo -e "${YELLOW}- Unchanged: $rel_path${NC}"
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
done < "$temp_file"
|
||||||
|
|
||||||
|
# Clean up temp file
|
||||||
|
rm -f "$temp_file"
|
||||||
|
|
||||||
|
if [ "$dry_run" = "true" ]; then
|
||||||
|
echo -e "${BLUE}Dry run completed. No files were actually backed up.${NC}"
|
||||||
|
return 0
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Update README with backup information
|
||||||
|
sed -i "/^## Last Backup/,$ d" README.md
|
||||||
|
cat >> README.md << EOF
|
||||||
|
|
||||||
|
## Last Backup
|
||||||
|
|
||||||
|
- **Date**: $timestamp
|
||||||
|
- **Files backed up**: $backup_count
|
||||||
|
- **Files unchanged**: $unchanged_count
|
||||||
|
- **Total files**: $((backup_count + unchanged_count))
|
||||||
|
|
||||||
|
Generated by backup-env-files.sh
|
||||||
|
EOF
|
||||||
|
|
||||||
|
# Commit changes
|
||||||
|
git add .
|
||||||
|
|
||||||
|
if git diff --staged --quiet; then
|
||||||
|
echo -e "${YELLOW}No changes to commit${NC}"
|
||||||
|
log "Backup completed - no changes detected"
|
||||||
|
else
|
||||||
|
git commit -m "Backup .env files - $timestamp
|
||||||
|
|
||||||
|
- Files backed up: $backup_count
|
||||||
|
- Files unchanged: $unchanged_count
|
||||||
|
- Total files: $((backup_count + unchanged_count))"
|
||||||
|
|
||||||
|
echo -e "${GREEN}Changes committed to local repository${NC}"
|
||||||
|
|
||||||
|
# Push to remote if configured
|
||||||
|
if git remote get-url origin >/dev/null 2>&1; then
|
||||||
|
echo -e "${YELLOW}Pushing to remote repository...${NC}"
|
||||||
|
if git push origin main 2>/dev/null || git push origin master 2>/dev/null; then
|
||||||
|
echo -e "${GREEN}✓ Successfully pushed to remote repository${NC}"
|
||||||
|
log "Backup completed and pushed to remote - $backup_count files backed up, $unchanged_count unchanged"
|
||||||
|
else
|
||||||
|
echo -e "${YELLOW}Warning: Could not push to remote repository${NC}"
|
||||||
|
echo "You may need to:"
|
||||||
|
echo "1. Create the repository in Gitea first"
|
||||||
|
echo "2. Set up authentication (SSH key or token)"
|
||||||
|
log "Backup completed locally but failed to push to remote - $backup_count files backed up"
|
||||||
|
fi
|
||||||
|
else
|
||||||
|
echo -e "${YELLOW}No remote repository configured${NC}"
|
||||||
|
log "Backup completed locally - $backup_count files backed up, $unchanged_count unchanged"
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo -e "${GREEN}Backup completed!${NC}"
|
||||||
|
echo -e "${BLUE}Summary:${NC}"
|
||||||
|
echo " - Files backed up: $backup_count"
|
||||||
|
echo " - Files unchanged: $unchanged_count"
|
||||||
|
echo " - Backup location: $BACKUP_DIR"
|
||||||
|
}
|
||||||
|
|
||||||
|
# Restore .env files
|
||||||
|
restore_env_files() {
|
||||||
|
echo -e "${YELLOW}Starting .env files restore...${NC}"
|
||||||
|
|
||||||
|
if [ ! -d "$BACKUP_DIR" ]; then
|
||||||
|
echo -e "${RED}Backup directory not found at $BACKUP_DIR${NC}"
|
||||||
|
echo "Either run --init first or clone your backup repository to this location."
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
cd "$BACKUP_DIR"
|
||||||
|
load_config
|
||||||
|
|
||||||
|
# Pull latest changes if remote is configured
|
||||||
|
if git remote get-url origin >/dev/null 2>&1; then
|
||||||
|
echo -e "${YELLOW}Pulling latest changes from remote...${NC}"
|
||||||
|
git pull origin main 2>/dev/null || git pull origin master 2>/dev/null || true
|
||||||
|
fi
|
||||||
|
|
||||||
|
local restore_count=0
|
||||||
|
local error_count=0
|
||||||
|
|
||||||
|
# Use a temp file to avoid subshell issues
|
||||||
|
local temp_file=$(mktemp)
|
||||||
|
find docker-containers -name "*.env" -type f 2>/dev/null > "$temp_file"
|
||||||
|
|
||||||
|
while IFS= read -r backup_file; do
|
||||||
|
if [ -n "$backup_file" ]; then
|
||||||
|
# Determine target path
|
||||||
|
local rel_path="${backup_file#docker-containers/}"
|
||||||
|
local target_file="$DOCKER_DIR/$rel_path"
|
||||||
|
local target_dir=$(dirname "$target_file")
|
||||||
|
|
||||||
|
# Create target directory if it doesn't exist
|
||||||
|
if [ ! -d "$target_dir" ]; then
|
||||||
|
echo -e "${YELLOW}Creating directory: $target_dir${NC}"
|
||||||
|
mkdir -p "$target_dir"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Ask for confirmation if file exists and is different
|
||||||
|
if [ -f "$target_file" ]; then
|
||||||
|
if ! cmp -s "$backup_file" "$target_file"; then
|
||||||
|
echo -e "${YELLOW}File exists and differs: $rel_path${NC}"
|
||||||
|
read -p "Overwrite? (y/N): " -n 1 -r
|
||||||
|
echo
|
||||||
|
if [[ ! $REPLY =~ ^[Yy]$ ]]; then
|
||||||
|
echo -e "${YELLOW}Skipped: $rel_path${NC}"
|
||||||
|
continue
|
||||||
|
fi
|
||||||
|
else
|
||||||
|
echo -e "${GREEN}Identical: $rel_path${NC}"
|
||||||
|
continue
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Copy the file
|
||||||
|
if cp "$backup_file" "$target_file"; then
|
||||||
|
echo -e "${GREEN}✓ Restored: $rel_path${NC}"
|
||||||
|
restore_count=$((restore_count + 1))
|
||||||
|
else
|
||||||
|
echo -e "${RED}✗ Failed to restore: $rel_path${NC}"
|
||||||
|
error_count=$((error_count + 1))
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
done < "$temp_file"
|
||||||
|
|
||||||
|
# Clean up temp file
|
||||||
|
rm -f "$temp_file"
|
||||||
|
|
||||||
|
echo -e "${GREEN}Restore completed!${NC}"
|
||||||
|
echo -e "${BLUE}Summary:${NC}"
|
||||||
|
echo " - Files restored: $restore_count"
|
||||||
|
echo " - Errors: $error_count"
|
||||||
|
|
||||||
|
log "Restore completed - $restore_count files restored, $error_count errors"
|
||||||
|
}
|
||||||
|
|
||||||
|
# Main function
|
||||||
|
main() {
|
||||||
|
local init_repo=false
|
||||||
|
local dry_run=false
|
||||||
|
local force=false
|
||||||
|
local restore=false
|
||||||
|
local list_files=false
|
||||||
|
|
||||||
|
# Parse command line arguments
|
||||||
|
while [[ $# -gt 0 ]]; do
|
||||||
|
case $1 in
|
||||||
|
-h|--help)
|
||||||
|
usage
|
||||||
|
exit 0
|
||||||
|
;;
|
||||||
|
-i|--init)
|
||||||
|
init_repo=true
|
||||||
|
shift
|
||||||
|
;;
|
||||||
|
-f|--force)
|
||||||
|
force=true
|
||||||
|
shift
|
||||||
|
;;
|
||||||
|
-d|--dry-run)
|
||||||
|
dry_run=true
|
||||||
|
shift
|
||||||
|
;;
|
||||||
|
-r|--restore)
|
||||||
|
restore=true
|
||||||
|
shift
|
||||||
|
;;
|
||||||
|
-l|--list)
|
||||||
|
list_files=true
|
||||||
|
shift
|
||||||
|
;;
|
||||||
|
-g|--gitea-url)
|
||||||
|
GITEA_URL="$2"
|
||||||
|
shift 2
|
||||||
|
;;
|
||||||
|
-u|--username)
|
||||||
|
GITEA_USERNAME="$2"
|
||||||
|
shift 2
|
||||||
|
;;
|
||||||
|
*)
|
||||||
|
echo "Unknown option: $1"
|
||||||
|
usage
|
||||||
|
exit 1
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
done
|
||||||
|
|
||||||
|
# Check dependencies
|
||||||
|
check_dependencies
|
||||||
|
|
||||||
|
# Execute requested action
|
||||||
|
if [ "$list_files" = true ]; then
|
||||||
|
list_env_files
|
||||||
|
elif [ "$init_repo" = true ]; then
|
||||||
|
init_backup_repo
|
||||||
|
elif [ "$restore" = true ]; then
|
||||||
|
restore_env_files
|
||||||
|
else
|
||||||
|
backup_env_files "$dry_run" "$force"
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
# Run main function with all arguments
|
||||||
|
main "$@"
|
||||||
@@ -50,12 +50,12 @@ monitor_realtime() {
|
|||||||
for tag in "${BACKUP_TAGS[@]}"; do
|
for tag in "${BACKUP_TAGS[@]}"; do
|
||||||
tags_filter="$tags_filter -t $tag"
|
tags_filter="$tags_filter -t $tag"
|
||||||
done
|
done
|
||||||
|
|
||||||
log_info "Starting real-time monitoring of backup logs"
|
log_info "Starting real-time monitoring of backup logs"
|
||||||
log_info "Press Ctrl+C to stop monitoring"
|
log_info "Press Ctrl+C to stop monitoring"
|
||||||
echo
|
echo
|
||||||
|
|
||||||
sudo journalctl -f $tags_filter --no-hostname --output=short-iso | while read -r line; do
|
sudo journalctl -f "$tags_filter" --no-hostname --output=short-iso | while read -r line; do
|
||||||
# Color code different log levels and services
|
# Color code different log levels and services
|
||||||
if [[ "$line" =~ ERROR ]]; then
|
if [[ "$line" =~ ERROR ]]; then
|
||||||
echo -e "${RED}$line${NC}"
|
echo -e "${RED}$line${NC}"
|
||||||
@@ -78,9 +78,9 @@ monitor_realtime() {
|
|||||||
show_recent_logs() {
|
show_recent_logs() {
|
||||||
local hours="${1:-24}"
|
local hours="${1:-24}"
|
||||||
local service="${2:-all}"
|
local service="${2:-all}"
|
||||||
|
|
||||||
log_info "Showing logs from the last $hours hours"
|
log_info "Showing logs from the last $hours hours"
|
||||||
|
|
||||||
local tags_filter=""
|
local tags_filter=""
|
||||||
if [ "$service" = "all" ]; then
|
if [ "$service" = "all" ]; then
|
||||||
for tag in "${BACKUP_TAGS[@]}"; do
|
for tag in "${BACKUP_TAGS[@]}"; do
|
||||||
@@ -89,9 +89,9 @@ show_recent_logs() {
|
|||||||
else
|
else
|
||||||
tags_filter="-t $service"
|
tags_filter="-t $service"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
echo
|
echo
|
||||||
sudo journalctl --since "${hours} hours ago" $tags_filter --no-hostname --output=short-iso | \
|
sudo journalctl --since "${hours} hours ago" "$tags_filter" --no-hostname --output=short-iso | \
|
||||||
while read -r line; do
|
while read -r line; do
|
||||||
# Color code the output
|
# Color code the output
|
||||||
if [[ "$line" =~ ERROR ]]; then
|
if [[ "$line" =~ ERROR ]]; then
|
||||||
@@ -108,14 +108,13 @@ show_recent_logs() {
|
|||||||
|
|
||||||
show_error_summary() {
|
show_error_summary() {
|
||||||
local days="${1:-7}"
|
local days="${1:-7}"
|
||||||
|
|
||||||
log_info "Error summary for the last $days days"
|
log_info "Error summary for the last $days days"
|
||||||
echo
|
echo
|
||||||
|
|
||||||
local error_file="/tmp/backup_errors_$$.tmp"
|
|
||||||
|
|
||||||
for tag in "${BACKUP_TAGS[@]}"; do
|
for tag in "${BACKUP_TAGS[@]}"; do
|
||||||
local error_count=$(sudo journalctl --since "${days} days ago" -t "$tag" --grep="ERROR" --output=cat | wc -l)
|
local error_count
|
||||||
|
error_count=$(sudo journalctl --since "${days} days ago" -t "$tag" --grep="ERROR" --output=cat | wc -l)
|
||||||
if [ "$error_count" -gt 0 ]; then
|
if [ "$error_count" -gt 0 ]; then
|
||||||
echo -e "${RED}$tag: $error_count errors${NC}"
|
echo -e "${RED}$tag: $error_count errors${NC}"
|
||||||
sudo journalctl --since "${days} days ago" -t "$tag" --grep="ERROR" --output=short-iso | head -5
|
sudo journalctl --since "${days} days ago" -t "$tag" --grep="ERROR" --output=short-iso | head -5
|
||||||
@@ -128,36 +127,40 @@ show_error_summary() {
|
|||||||
|
|
||||||
generate_backup_report() {
|
generate_backup_report() {
|
||||||
local days="${1:-7}"
|
local days="${1:-7}"
|
||||||
local report_file="$REPORT_DIR/backup-report-$(date +%Y%m%d_%H%M%S).txt"
|
local report_file
|
||||||
|
report_file="$REPORT_DIR/backup-report-$(date +%Y%m%d_%H%M%S).txt"
|
||||||
|
|
||||||
log_info "Generating comprehensive backup report for the last $days days"
|
log_info "Generating comprehensive backup report for the last $days days"
|
||||||
log_info "Report will be saved to: $report_file"
|
log_info "Report will be saved to: $report_file"
|
||||||
|
|
||||||
{
|
{
|
||||||
echo "=== BACKUP SYSTEM REPORT ==="
|
echo "=== BACKUP SYSTEM REPORT ==="
|
||||||
echo "Generated: $(date)"
|
echo "Generated: $(date)"
|
||||||
echo "Period: Last $days days"
|
echo "Period: Last $days days"
|
||||||
echo "System: $(uname -n)"
|
echo "System: $(uname -n)"
|
||||||
echo
|
echo
|
||||||
|
|
||||||
for tag in "${BACKUP_TAGS[@]}"; do
|
for tag in "${BACKUP_TAGS[@]}"; do
|
||||||
echo "=== $tag ==="
|
echo "=== $tag ==="
|
||||||
|
|
||||||
# Count entries
|
# Count entries
|
||||||
local total_entries=$(sudo journalctl --since "${days} days ago" -t "$tag" --output=cat | wc -l)
|
local total_entries
|
||||||
local error_count=$(sudo journalctl --since "${days} days ago" -t "$tag" --grep="ERROR" --output=cat | wc -l)
|
local error_count
|
||||||
local success_count=$(sudo journalctl --since "${days} days ago" -t "$tag" --grep="SUCCESS" --output=cat | wc -l)
|
local success_count
|
||||||
|
total_entries=$(sudo journalctl --since "${days} days ago" -t "$tag" --output=cat | wc -l)
|
||||||
|
error_count=$(sudo journalctl --since "${days} days ago" -t "$tag" --grep="ERROR" --output=cat | wc -l)
|
||||||
|
success_count=$(sudo journalctl --since "${days} days ago" -t "$tag" --grep="SUCCESS" --output=cat | wc -l)
|
||||||
|
|
||||||
echo "Total log entries: $total_entries"
|
echo "Total log entries: $total_entries"
|
||||||
echo "Errors: $error_count"
|
echo "Errors: $error_count"
|
||||||
echo "Successes: $success_count"
|
echo "Successes: $success_count"
|
||||||
|
|
||||||
if [ "$error_count" -gt 0 ]; then
|
if [ "$error_count" -gt 0 ]; then
|
||||||
echo
|
echo
|
||||||
echo "Recent errors:"
|
echo "Recent errors:"
|
||||||
sudo journalctl --since "${days} days ago" -t "$tag" --grep="ERROR" --output=short-iso | head -10
|
sudo journalctl --since "${days} days ago" -t "$tag" --grep="ERROR" --output=short-iso | head -10
|
||||||
fi
|
fi
|
||||||
|
|
||||||
echo
|
echo
|
||||||
echo "Recent activity:"
|
echo "Recent activity:"
|
||||||
sudo journalctl --since "${days} days ago" -t "$tag" --output=short-iso | tail -5
|
sudo journalctl --since "${days} days ago" -t "$tag" --output=short-iso | tail -5
|
||||||
@@ -165,7 +168,7 @@ generate_backup_report() {
|
|||||||
echo "----------------------------------------"
|
echo "----------------------------------------"
|
||||||
echo
|
echo
|
||||||
done
|
done
|
||||||
|
|
||||||
# System resource usage during backups
|
# System resource usage during backups
|
||||||
echo "=== SYSTEM ANALYSIS ==="
|
echo "=== SYSTEM ANALYSIS ==="
|
||||||
echo "Disk usage in backup directories:"
|
echo "Disk usage in backup directories:"
|
||||||
@@ -173,16 +176,16 @@ generate_backup_report() {
|
|||||||
du -sh /mnt/share/media/backups/* 2>/dev/null || echo "No backup directories found"
|
du -sh /mnt/share/media/backups/* 2>/dev/null || echo "No backup directories found"
|
||||||
fi
|
fi
|
||||||
echo
|
echo
|
||||||
|
|
||||||
# Cron job status
|
# Cron job status
|
||||||
echo "Active cron jobs related to backups:"
|
echo "Active cron jobs related to backups:"
|
||||||
sudo crontab -l 2>/dev/null | grep -E "(backup|plex|immich)" || echo "No backup-related cron jobs found"
|
sudo crontab -l 2>/dev/null | grep -E "(backup|plex|immich)" || echo "No backup-related cron jobs found"
|
||||||
echo
|
echo
|
||||||
|
|
||||||
} > "$report_file"
|
} > "$report_file"
|
||||||
|
|
||||||
log_success "Report generated: $report_file"
|
log_success "Report generated: $report_file"
|
||||||
|
|
||||||
# Show summary
|
# Show summary
|
||||||
echo
|
echo
|
||||||
log_info "Report Summary:"
|
log_info "Report Summary:"
|
||||||
@@ -198,10 +201,10 @@ generate_backup_report() {
|
|||||||
check_backup_health() {
|
check_backup_health() {
|
||||||
log_info "Checking backup system health for $HOSTNAME"
|
log_info "Checking backup system health for $HOSTNAME"
|
||||||
echo
|
echo
|
||||||
|
|
||||||
local health_score=100
|
local health_score=100
|
||||||
local issues=()
|
local issues=()
|
||||||
|
|
||||||
# Check if backup scripts exist
|
# Check if backup scripts exist
|
||||||
local backup_scripts=(
|
local backup_scripts=(
|
||||||
"/home/acedanger/shell/backup-plex.sh"
|
"/home/acedanger/shell/backup-plex.sh"
|
||||||
@@ -209,7 +212,7 @@ check_backup_health() {
|
|||||||
"/home/acedanger/shell/validate-plex-backups.sh"
|
"/home/acedanger/shell/validate-plex-backups.sh"
|
||||||
"/home/acedanger/shell/crontab/crontab-backup-system.sh"
|
"/home/acedanger/shell/crontab/crontab-backup-system.sh"
|
||||||
)
|
)
|
||||||
|
|
||||||
for script in "${backup_scripts[@]}"; do
|
for script in "${backup_scripts[@]}"; do
|
||||||
if [ ! -f "$script" ]; then
|
if [ ! -f "$script" ]; then
|
||||||
issues+=("Missing script: $script")
|
issues+=("Missing script: $script")
|
||||||
@@ -219,28 +222,28 @@ check_backup_health() {
|
|||||||
((health_score -= 10))
|
((health_score -= 10))
|
||||||
fi
|
fi
|
||||||
done
|
done
|
||||||
|
|
||||||
# Check if backup directories exist
|
# Check if backup directories exist
|
||||||
local backup_dirs=(
|
local backup_dirs=(
|
||||||
"/mnt/share/media/backups/plex"
|
"/mnt/share/media/backups/plex"
|
||||||
"/mnt/share/media/backups/docker-data"
|
"/mnt/share/media/backups/docker-data"
|
||||||
"/mnt/share/media/backups/immich"
|
"/mnt/share/media/backups/immich"
|
||||||
)
|
)
|
||||||
|
|
||||||
for dir in "${backup_dirs[@]}"; do
|
for dir in "${backup_dirs[@]}"; do
|
||||||
if [ ! -d "$dir" ]; then
|
if [ ! -d "$dir" ]; then
|
||||||
issues+=("Missing backup directory: $dir")
|
issues+=("Missing backup directory: $dir")
|
||||||
((health_score -= 15))
|
((health_score -= 15))
|
||||||
fi
|
fi
|
||||||
done
|
done
|
||||||
|
|
||||||
# Check crontab backup system structure
|
# Check crontab backup system structure
|
||||||
local crontab_backup_dir="$SCRIPT_DIR/crontab-backups/$HOSTNAME"
|
local crontab_backup_dir="$SCRIPT_DIR/crontab-backups/$HOSTNAME"
|
||||||
if [ ! -d "$crontab_backup_dir" ]; then
|
if [ ! -d "$crontab_backup_dir" ]; then
|
||||||
issues+=("Missing crontab backup directory for $HOSTNAME: $crontab_backup_dir")
|
issues+=("Missing crontab backup directory for $HOSTNAME: $crontab_backup_dir")
|
||||||
((health_score -= 10))
|
((health_score -= 10))
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Check recent backup activity
|
# Check recent backup activity
|
||||||
local recent_activity=false
|
local recent_activity=false
|
||||||
for tag in "${BACKUP_TAGS[@]}"; do
|
for tag in "${BACKUP_TAGS[@]}"; do
|
||||||
@@ -249,29 +252,30 @@ check_backup_health() {
|
|||||||
break
|
break
|
||||||
fi
|
fi
|
||||||
done
|
done
|
||||||
|
|
||||||
if [ "$recent_activity" = false ]; then
|
if [ "$recent_activity" = false ]; then
|
||||||
issues+=("No backup activity in the last 24 hours")
|
issues+=("No backup activity in the last 24 hours")
|
||||||
((health_score -= 25))
|
((health_score -= 25))
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Check for recent errors
|
# Check for recent errors
|
||||||
local recent_errors=0
|
local recent_errors=0
|
||||||
for tag in "${BACKUP_TAGS[@]}"; do
|
for tag in "${BACKUP_TAGS[@]}"; do
|
||||||
local error_count=$(sudo journalctl --since "24 hours ago" -t "$tag" --grep="ERROR" --output=cat | wc -l)
|
local error_count
|
||||||
|
error_count=$(sudo journalctl --since "24 hours ago" -t "$tag" --grep="ERROR" --output=cat | wc -l)
|
||||||
((recent_errors += error_count))
|
((recent_errors += error_count))
|
||||||
done
|
done
|
||||||
|
|
||||||
if [ "$recent_errors" -gt 0 ]; then
|
if [ "$recent_errors" -gt 0 ]; then
|
||||||
issues+=("$recent_errors errors in the last 24 hours")
|
issues+=("$recent_errors errors in the last 24 hours")
|
||||||
((health_score -= $((recent_errors * 5))))
|
((health_score -= $((recent_errors * 5))))
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Ensure health score doesn't go below 0
|
# Ensure health score doesn't go below 0
|
||||||
if [ "$health_score" -lt 0 ]; then
|
if [ "$health_score" -lt 0 ]; then
|
||||||
health_score=0
|
health_score=0
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Display results
|
# Display results
|
||||||
if [ "$health_score" -ge 90 ]; then
|
if [ "$health_score" -ge 90 ]; then
|
||||||
echo -e "${GREEN}Backup System Health ($HOSTNAME): ${health_score}% - EXCELLENT${NC}"
|
echo -e "${GREEN}Backup System Health ($HOSTNAME): ${health_score}% - EXCELLENT${NC}"
|
||||||
@@ -282,14 +286,14 @@ check_backup_health() {
|
|||||||
else
|
else
|
||||||
echo -e "${RED}Backup System Health ($HOSTNAME): ${health_score}% - POOR${NC}"
|
echo -e "${RED}Backup System Health ($HOSTNAME): ${health_score}% - POOR${NC}"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [ ${#issues[@]} -gt 0 ]; then
|
if [ ${#issues[@]} -gt 0 ]; then
|
||||||
echo
|
echo
|
||||||
log_warning "Issues found:"
|
log_warning "Issues found:"
|
||||||
for issue in "${issues[@]}"; do
|
for issue in "${issues[@]}"; do
|
||||||
echo -e " ${RED}• $issue${NC}"
|
echo -e " ${RED}• $issue${NC}"
|
||||||
done
|
done
|
||||||
|
|
||||||
echo
|
echo
|
||||||
log_info "Recommended actions:"
|
log_info "Recommended actions:"
|
||||||
echo " • Run: ./manage-enhanced-crontab.sh verify"
|
echo " • Run: ./manage-enhanced-crontab.sh verify"
|
||||||
@@ -302,22 +306,27 @@ check_backup_health() {
|
|||||||
show_service_status() {
|
show_service_status() {
|
||||||
log_info "Backup Service Status Overview"
|
log_info "Backup Service Status Overview"
|
||||||
echo
|
echo
|
||||||
|
|
||||||
printf "%-20s %-15s %-20s %-30s\n" "Service" "Status" "Last Activity" "Last Message"
|
printf "%-20s %-15s %-20s %-30s\n" "Service" "Status" "Last Activity" "Last Message"
|
||||||
printf "%-20s %-15s %-20s %-30s\n" "-------" "------" "-------------" "------------"
|
printf "%-20s %-15s %-20s %-30s\n" "-------" "------" "-------------" "------------"
|
||||||
|
|
||||||
for tag in "${BACKUP_TAGS[@]}"; do
|
for tag in "${BACKUP_TAGS[@]}"; do
|
||||||
local last_entry=$(sudo journalctl -t "$tag" --output=short-iso -n 1 2>/dev/null | tail -1)
|
local last_entry
|
||||||
|
last_entry=$(sudo journalctl -t "$tag" --output=short-iso -n 1 2>/dev/null | tail -1)
|
||||||
|
|
||||||
if [ -n "$last_entry" ]; then
|
if [ -n "$last_entry" ]; then
|
||||||
local timestamp=$(echo "$last_entry" | cut -d' ' -f1-2)
|
local timestamp
|
||||||
local message=$(echo "$last_entry" | cut -d' ' -f4- | cut -c1-30)
|
local message
|
||||||
|
local entry_time
|
||||||
|
local current_time
|
||||||
|
timestamp=$(echo "$last_entry" | cut -d' ' -f1-2)
|
||||||
|
message=$(echo "$last_entry" | cut -d' ' -f4- | cut -c1-30)
|
||||||
|
|
||||||
# Check if it's recent (within 48 hours)
|
# Check if it's recent (within 48 hours)
|
||||||
local entry_time=$(date -d "$timestamp" +%s 2>/dev/null || echo "0")
|
entry_time=$(date -d "$timestamp" +%s 2>/dev/null || echo "0")
|
||||||
local current_time=$(date +%s)
|
current_time=$(date +%s)
|
||||||
local hours_diff=$(( (current_time - entry_time) / 3600 ))
|
local hours_diff=$(( (current_time - entry_time) / 3600 ))
|
||||||
|
|
||||||
local status
|
local status
|
||||||
if [ "$hours_diff" -le 24 ]; then
|
if [ "$hours_diff" -le 24 ]; then
|
||||||
status="${GREEN}Active${NC}"
|
status="${GREEN}Active${NC}"
|
||||||
@@ -326,7 +335,7 @@ show_service_status() {
|
|||||||
else
|
else
|
||||||
status="${RED}Stale${NC}"
|
status="${RED}Stale${NC}"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
printf "%-20s %-25s %-20s %-30s\n" "$tag" "$status" "$timestamp" "$message"
|
printf "%-20s %-25s %-20s %-30s\n" "$tag" "$status" "$timestamp" "$message"
|
||||||
else
|
else
|
||||||
printf "%-20s %-25s %-20s %-30s\n" "$tag" "${RED}No logs${NC}" "Never" "No activity found"
|
printf "%-20s %-25s %-20s %-30s\n" "$tag" "${RED}No logs${NC}" "Never" "No activity found"
|
||||||
|
|||||||
257
backup-media.sh
257
backup-media.sh
@@ -11,10 +11,6 @@ CYAN='\033[0;36m'
|
|||||||
NC='\033[0m' # No Color
|
NC='\033[0m' # No Color
|
||||||
|
|
||||||
# Performance tracking variables
|
# Performance tracking variables
|
||||||
SCRIPT_START_TIME=$(date +%s)
|
|
||||||
BACKUP_START_TIME=""
|
|
||||||
VERIFICATION_START_TIME=""
|
|
||||||
|
|
||||||
# Configuration
|
# Configuration
|
||||||
MAX_BACKUP_AGE_DAYS=30
|
MAX_BACKUP_AGE_DAYS=30
|
||||||
MAX_BACKUPS_TO_KEEP=10
|
MAX_BACKUPS_TO_KEEP=10
|
||||||
@@ -132,71 +128,43 @@ declare -A BACKUP_DESTINATIONS=(
|
|||||||
["jellyseerr_settings"]="${BACKUP_ROOT}/jellyseerr/backup_$(date +%Y%m%d)/"
|
["jellyseerr_settings"]="${BACKUP_ROOT}/jellyseerr/backup_$(date +%Y%m%d)/"
|
||||||
)
|
)
|
||||||
|
|
||||||
# Show help function
|
|
||||||
show_help() {
|
|
||||||
cat << EOF
|
|
||||||
Media Services Backup Script
|
|
||||||
|
|
||||||
Usage: $0 [OPTIONS]
|
|
||||||
|
|
||||||
OPTIONS:
|
|
||||||
--dry-run Show what would be backed up without actually doing it
|
|
||||||
--no-verify Skip backup verification
|
|
||||||
--sequential Run backups sequentially instead of in parallel
|
|
||||||
--interactive Ask for confirmation before each backup
|
|
||||||
--webhook URL Custom webhook URL for notifications
|
|
||||||
-h, --help Show this help message
|
|
||||||
|
|
||||||
EXAMPLES:
|
|
||||||
$0 # Run full backup with default settings
|
|
||||||
$0 --dry-run # Preview what would be backed up
|
|
||||||
$0 --sequential # Run backups one at a time
|
|
||||||
$0 --no-verify # Skip verification for faster backup
|
|
||||||
|
|
||||||
SERVICES BACKED UP:
|
|
||||||
- Sonarr (TV Shows)
|
|
||||||
- Radarr (Movies)
|
|
||||||
- Prowlarr (Indexers)
|
|
||||||
- Audiobookshelf (Audiobooks)
|
|
||||||
- Tautulli (Plex Statistics)
|
|
||||||
- SABnzbd (Downloads)
|
|
||||||
- Jellyseerr (Requests)
|
|
||||||
|
|
||||||
EOF
|
|
||||||
}
|
|
||||||
|
|
||||||
# Logging functions
|
# Logging functions
|
||||||
log_message() {
|
log_message() {
|
||||||
local message="$1"
|
local message="$1"
|
||||||
local timestamp=$(date '+%Y-%m-%d %H:%M:%S')
|
local timestamp
|
||||||
|
timestamp=$(date '+%Y-%m-%d %H:%M:%S')
|
||||||
echo -e "${CYAN}[${timestamp}]${NC} ${message}"
|
echo -e "${CYAN}[${timestamp}]${NC} ${message}"
|
||||||
echo "[${timestamp}] $message" >> "${LOG_FILE}" 2>/dev/null || true
|
echo "[${timestamp}] $message" >> "${LOG_FILE}" 2>/dev/null || true
|
||||||
}
|
}
|
||||||
|
|
||||||
log_error() {
|
log_error() {
|
||||||
local message="$1"
|
local message="$1"
|
||||||
local timestamp=$(date '+%Y-%m-%d %H:%M:%S')
|
local timestamp
|
||||||
|
timestamp=$(date '+%Y-%m-%d %H:%M:%S')
|
||||||
echo -e "${RED}[${timestamp}] ERROR:${NC} ${message}" >&2
|
echo -e "${RED}[${timestamp}] ERROR:${NC} ${message}" >&2
|
||||||
echo "[${timestamp}] ERROR: $message" >> "${LOG_FILE}" 2>/dev/null || true
|
echo "[${timestamp}] ERROR: $message" >> "${LOG_FILE}" 2>/dev/null || true
|
||||||
}
|
}
|
||||||
|
|
||||||
log_success() {
|
log_success() {
|
||||||
local message="$1"
|
local message="$1"
|
||||||
local timestamp=$(date '+%Y-%m-%d %H:%M:%S')
|
local timestamp
|
||||||
|
timestamp=$(date '+%Y-%m-%d %H:%M:%S')
|
||||||
echo -e "${GREEN}[${timestamp}] SUCCESS:${NC} ${message}"
|
echo -e "${GREEN}[${timestamp}] SUCCESS:${NC} ${message}"
|
||||||
echo "[${timestamp}] SUCCESS: $message" >> "${LOG_FILE}" 2>/dev/null || true
|
echo "[${timestamp}] SUCCESS: $message" >> "${LOG_FILE}" 2>/dev/null || true
|
||||||
}
|
}
|
||||||
|
|
||||||
log_warning() {
|
log_warning() {
|
||||||
local message="$1"
|
local message="$1"
|
||||||
local timestamp=$(date '+%Y-%m-%d %H:%M:%S')
|
local timestamp
|
||||||
|
timestamp=$(date '+%Y-%m-%d %H:%M:%S')
|
||||||
echo -e "${YELLOW}[${timestamp}] WARNING:${NC} ${message}"
|
echo -e "${YELLOW}[${timestamp}] WARNING:${NC} ${message}"
|
||||||
echo "[${timestamp}] WARNING: $message" >> "${LOG_FILE}" 2>/dev/null || true
|
echo "[${timestamp}] WARNING: $message" >> "${LOG_FILE}" 2>/dev/null || true
|
||||||
}
|
}
|
||||||
|
|
||||||
log_info() {
|
log_info() {
|
||||||
local message="$1"
|
local message="$1"
|
||||||
local timestamp=$(date '+%Y-%m-%d %H:%M:%S')
|
local timestamp
|
||||||
|
timestamp=$(date '+%Y-%m-%d %H:%M:%S')
|
||||||
echo -e "${BLUE}[${timestamp}] INFO:${NC} ${message}"
|
echo -e "${BLUE}[${timestamp}] INFO:${NC} ${message}"
|
||||||
echo "[${timestamp}] INFO: $message" >> "${LOG_FILE}" 2>/dev/null || true
|
echo "[${timestamp}] INFO: $message" >> "${LOG_FILE}" 2>/dev/null || true
|
||||||
}
|
}
|
||||||
@@ -206,19 +174,20 @@ track_performance() {
|
|||||||
if [ "$PERFORMANCE_MONITORING" != true ]; then
|
if [ "$PERFORMANCE_MONITORING" != true ]; then
|
||||||
return 0
|
return 0
|
||||||
fi
|
fi
|
||||||
|
|
||||||
local operation="$1"
|
local operation="$1"
|
||||||
local start_time="$2"
|
local start_time="$2"
|
||||||
local end_time="${3:-$(date +%s)}"
|
local end_time="${3:-$(date +%s)}"
|
||||||
local duration=$((end_time - start_time))
|
local duration=$((end_time - start_time))
|
||||||
|
|
||||||
# Initialize performance log if it doesn't exist
|
# Initialize performance log if it doesn't exist
|
||||||
if [ ! -f "$PERFORMANCE_LOG_FILE" ]; then
|
if [ ! -f "$PERFORMANCE_LOG_FILE" ]; then
|
||||||
echo "[]" > "$PERFORMANCE_LOG_FILE"
|
echo "[]" > "$PERFORMANCE_LOG_FILE"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Add performance entry with lock protection
|
# Add performance entry with lock protection
|
||||||
local entry=$(jq -n \
|
local entry
|
||||||
|
entry=$(jq -n \
|
||||||
--arg timestamp "$(date -Iseconds)" \
|
--arg timestamp "$(date -Iseconds)" \
|
||||||
--arg operation "$operation" \
|
--arg operation "$operation" \
|
||||||
--arg duration "$duration" \
|
--arg duration "$duration" \
|
||||||
@@ -229,12 +198,12 @@ track_performance() {
|
|||||||
duration: ($duration | tonumber),
|
duration: ($duration | tonumber),
|
||||||
hostname: $hostname
|
hostname: $hostname
|
||||||
}')
|
}')
|
||||||
|
|
||||||
if command -v jq > /dev/null 2>&1; then
|
if command -v jq > /dev/null 2>&1; then
|
||||||
local lock_file="${PERFORMANCE_LOG_FILE}.lock"
|
local lock_file="${PERFORMANCE_LOG_FILE}.lock"
|
||||||
local max_wait=10
|
local max_wait=10
|
||||||
local wait_count=0
|
local wait_count=0
|
||||||
|
|
||||||
while [ $wait_count -lt $max_wait ]; do
|
while [ $wait_count -lt $max_wait ]; do
|
||||||
if (set -C; echo $$ > "$lock_file") 2>/dev/null; then
|
if (set -C; echo $$ > "$lock_file") 2>/dev/null; then
|
||||||
break
|
break
|
||||||
@@ -242,7 +211,7 @@ track_performance() {
|
|||||||
sleep 0.1
|
sleep 0.1
|
||||||
((wait_count++))
|
((wait_count++))
|
||||||
done
|
done
|
||||||
|
|
||||||
if [ $wait_count -lt $max_wait ]; then
|
if [ $wait_count -lt $max_wait ]; then
|
||||||
if jq --argjson entry "$entry" '. += [$entry]' "$PERFORMANCE_LOG_FILE" > "${PERFORMANCE_LOG_FILE}.tmp" 2>/dev/null; then
|
if jq --argjson entry "$entry" '. += [$entry]' "$PERFORMANCE_LOG_FILE" > "${PERFORMANCE_LOG_FILE}.tmp" 2>/dev/null; then
|
||||||
mv "${PERFORMANCE_LOG_FILE}.tmp" "$PERFORMANCE_LOG_FILE"
|
mv "${PERFORMANCE_LOG_FILE}.tmp" "$PERFORMANCE_LOG_FILE"
|
||||||
@@ -252,7 +221,7 @@ track_performance() {
|
|||||||
rm -f "$lock_file"
|
rm -f "$lock_file"
|
||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
|
|
||||||
log_info "Performance: $operation completed in ${duration}s"
|
log_info "Performance: $operation completed in ${duration}s"
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -272,7 +241,7 @@ log_file_details() {
|
|||||||
local status="$4"
|
local status="$4"
|
||||||
local size=""
|
local size=""
|
||||||
local checksum=""
|
local checksum=""
|
||||||
|
|
||||||
# Calculate size if backup was successful
|
# Calculate size if backup was successful
|
||||||
if [ "$status" == "SUCCESS" ] && [ -e "$dest" ]; then
|
if [ "$status" == "SUCCESS" ] && [ -e "$dest" ]; then
|
||||||
size=$(du -sh "$dest" 2>/dev/null | cut -f1 || echo "Unknown")
|
size=$(du -sh "$dest" 2>/dev/null | cut -f1 || echo "Unknown")
|
||||||
@@ -283,12 +252,12 @@ log_file_details() {
|
|||||||
size="N/A"
|
size="N/A"
|
||||||
checksum="N/A"
|
checksum="N/A"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Use a lock file for markdown log to prevent race conditions
|
# Use a lock file for markdown log to prevent race conditions
|
||||||
local markdown_lock="${MARKDOWN_LOG}.lock"
|
local markdown_lock="${MARKDOWN_LOG}.lock"
|
||||||
local max_wait=30
|
local max_wait=30
|
||||||
local wait_count=0
|
local wait_count=0
|
||||||
|
|
||||||
while [ $wait_count -lt $max_wait ]; do
|
while [ $wait_count -lt $max_wait ]; do
|
||||||
if (set -C; echo $$ > "$markdown_lock") 2>/dev/null; then
|
if (set -C; echo $$ > "$markdown_lock") 2>/dev/null; then
|
||||||
break
|
break
|
||||||
@@ -296,25 +265,25 @@ log_file_details() {
|
|||||||
sleep 0.1
|
sleep 0.1
|
||||||
((wait_count++))
|
((wait_count++))
|
||||||
done
|
done
|
||||||
|
|
||||||
if [ $wait_count -lt $max_wait ]; then
|
if [ $wait_count -lt $max_wait ]; then
|
||||||
# Log to markdown file safely
|
# Log to markdown file safely
|
||||||
{
|
{
|
||||||
echo "## $service Backup"
|
echo "## $service Backup"
|
||||||
echo "- **Status**: $status"
|
echo "- **Status**: $status"
|
||||||
echo "- **Source**: \`$src\`"
|
echo "- **Source**: \$($src\)"
|
||||||
echo "- **Destination**: \`$dest\`"
|
echo "- **Destination**: \$($dest\)"
|
||||||
echo "- **Size**: $size"
|
echo "- **Size**: $size"
|
||||||
echo "- **Checksum**: $checksum"
|
echo "- **Checksum**: $checksum"
|
||||||
echo "- **Timestamp**: $(date '+%Y-%m-%d %H:%M:%S')"
|
echo "- **Timestamp**: $(date '+%Y-%m-%d %H:%M:%S')"
|
||||||
echo ""
|
echo ""
|
||||||
} >> "$MARKDOWN_LOG"
|
} >> "$MARKDOWN_LOG"
|
||||||
|
|
||||||
rm -f "$markdown_lock"
|
rm -f "$markdown_lock"
|
||||||
else
|
else
|
||||||
log_warning "Could not acquire markdown log lock for $service"
|
log_warning "Could not acquire markdown log lock for $service"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Log to JSON
|
# Log to JSON
|
||||||
if command -v jq > /dev/null 2>&1; then
|
if command -v jq > /dev/null 2>&1; then
|
||||||
update_backup_log "$service" "$src" "$dest" "$status" "$size" "$checksum"
|
update_backup_log "$service" "$src" "$dest" "$status" "$size" "$checksum"
|
||||||
@@ -329,17 +298,18 @@ update_backup_log() {
|
|||||||
local status="$4"
|
local status="$4"
|
||||||
local size="$5"
|
local size="$5"
|
||||||
local checksum="$6"
|
local checksum="$6"
|
||||||
local timestamp=$(date -Iseconds)
|
local timestamp
|
||||||
|
timestamp=$(date -Iseconds)
|
||||||
|
|
||||||
if ! command -v jq > /dev/null 2>&1; then
|
if ! command -v jq > /dev/null 2>&1; then
|
||||||
return 0
|
return 0
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Use a lock file for parallel safety
|
# Use a lock file for parallel safety
|
||||||
local lock_file="${JSON_LOG_FILE}.lock"
|
local lock_file="${JSON_LOG_FILE}.lock"
|
||||||
local max_wait=30
|
local max_wait=30
|
||||||
local wait_count=0
|
local wait_count=0
|
||||||
|
|
||||||
while [ $wait_count -lt $max_wait ]; do
|
while [ $wait_count -lt $max_wait ]; do
|
||||||
if (set -C; echo $$ > "$lock_file") 2>/dev/null; then
|
if (set -C; echo $$ > "$lock_file") 2>/dev/null; then
|
||||||
break
|
break
|
||||||
@@ -347,14 +317,15 @@ update_backup_log() {
|
|||||||
sleep 0.1
|
sleep 0.1
|
||||||
((wait_count++))
|
((wait_count++))
|
||||||
done
|
done
|
||||||
|
|
||||||
if [ $wait_count -ge $max_wait ]; then
|
if [ $wait_count -ge $max_wait ]; then
|
||||||
log_warning "Could not acquire lock for JSON log update"
|
log_warning "Could not acquire lock for JSON log update"
|
||||||
return 1
|
return 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Create entry for this backup
|
# Create entry for this backup
|
||||||
local entry=$(jq -n \
|
local entry
|
||||||
|
entry=$(jq -n \
|
||||||
--arg service "$service" \
|
--arg service "$service" \
|
||||||
--arg src "$src" \
|
--arg src "$src" \
|
||||||
--arg dest "$dest" \
|
--arg dest "$dest" \
|
||||||
@@ -371,7 +342,7 @@ update_backup_log() {
|
|||||||
checksum: $checksum,
|
checksum: $checksum,
|
||||||
timestamp: $timestamp
|
timestamp: $timestamp
|
||||||
}')
|
}')
|
||||||
|
|
||||||
# Update JSON log safely
|
# Update JSON log safely
|
||||||
if jq --argjson entry "$entry" --arg service "$service" \
|
if jq --argjson entry "$entry" --arg service "$service" \
|
||||||
'.[$service] = $entry' "$JSON_LOG_FILE" > "${JSON_LOG_FILE}.tmp" 2>/dev/null; then
|
'.[$service] = $entry' "$JSON_LOG_FILE" > "${JSON_LOG_FILE}.tmp" 2>/dev/null; then
|
||||||
@@ -379,7 +350,7 @@ update_backup_log() {
|
|||||||
else
|
else
|
||||||
rm -f "${JSON_LOG_FILE}.tmp"
|
rm -f "${JSON_LOG_FILE}.tmp"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Remove lock file
|
# Remove lock file
|
||||||
rm -f "$lock_file"
|
rm -f "$lock_file"
|
||||||
}
|
}
|
||||||
@@ -387,12 +358,12 @@ update_backup_log() {
|
|||||||
# Check if Docker container is running
|
# Check if Docker container is running
|
||||||
check_container_running() {
|
check_container_running() {
|
||||||
local container="$1"
|
local container="$1"
|
||||||
|
|
||||||
if ! docker ps --format "table {{.Names}}" | grep -q "^${container}$"; then
|
if ! docker ps --format "table {{.Names}}" | grep -q "^${container}$"; then
|
||||||
log_warning "Container '$container' is not running"
|
log_warning "Container '$container' is not running"
|
||||||
return 1
|
return 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
return 0
|
return 0
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -401,18 +372,20 @@ verify_backup() {
|
|||||||
local src_container="$1"
|
local src_container="$1"
|
||||||
local src_path="$2"
|
local src_path="$2"
|
||||||
local dest_path="$3"
|
local dest_path="$3"
|
||||||
|
|
||||||
if [ "$VERIFY_BACKUPS" != true ]; then
|
if [ "$VERIFY_BACKUPS" != true ]; then
|
||||||
return 0
|
return 0
|
||||||
fi
|
fi
|
||||||
|
|
||||||
log_info "Verifying backup integrity for $src_container:$src_path"
|
log_info "Verifying backup integrity for $src_container:$src_path"
|
||||||
|
|
||||||
# For files, compare checksums
|
# For files, compare checksums
|
||||||
if [[ "$src_path" == *.ini ]] || [[ "$src_path" == *.json ]]; then
|
if [[ "$src_path" == *.ini ]] || [[ "$src_path" == *.json ]]; then
|
||||||
local src_checksum=$(docker exec "$src_container" md5sum "$src_path" 2>/dev/null | cut -d' ' -f1 || echo "")
|
local src_checksum
|
||||||
local dest_checksum=$(md5sum "$dest_path" 2>/dev/null | cut -d' ' -f1 || echo "")
|
local dest_checksum
|
||||||
|
src_checksum=$(docker exec "$src_container" md5sum "$src_path" 2>/dev/null | cut -d' ' -f1 || echo "")
|
||||||
|
dest_checksum=$(md5sum "$dest_path" 2>/dev/null | cut -d' ' -f1 || echo "")
|
||||||
|
|
||||||
if [ -n "$src_checksum" ] && [ -n "$dest_checksum" ] && [ "$src_checksum" == "$dest_checksum" ]; then
|
if [ -n "$src_checksum" ] && [ -n "$dest_checksum" ] && [ "$src_checksum" == "$dest_checksum" ]; then
|
||||||
log_success "Backup verification passed for $src_container:$src_path"
|
log_success "Backup verification passed for $src_container:$src_path"
|
||||||
return 0
|
return 0
|
||||||
@@ -421,10 +394,11 @@ verify_backup() {
|
|||||||
return 1
|
return 1
|
||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# For directories, check if they exist and have content
|
# For directories, check if they exist and have content
|
||||||
if [ -d "$dest_path" ]; then
|
if [ -d "$dest_path" ]; then
|
||||||
local file_count=$(find "$dest_path" -type f 2>/dev/null | wc -l)
|
local file_count
|
||||||
|
file_count=$(find "$dest_path" -type f 2>/dev/null | wc -l)
|
||||||
if [ "$file_count" -gt 0 ]; then
|
if [ "$file_count" -gt 0 ]; then
|
||||||
log_success "Backup verification passed for $src_container:$src_path ($file_count files)"
|
log_success "Backup verification passed for $src_container:$src_path ($file_count files)"
|
||||||
return 0
|
return 0
|
||||||
@@ -433,7 +407,7 @@ verify_backup() {
|
|||||||
return 1
|
return 1
|
||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
|
|
||||||
log_warning "Unable to verify backup for $src_container:$src_path"
|
log_warning "Unable to verify backup for $src_container:$src_path"
|
||||||
return 0
|
return 0
|
||||||
}
|
}
|
||||||
@@ -442,38 +416,39 @@ verify_backup() {
|
|||||||
backup_service() {
|
backup_service() {
|
||||||
local service="$1"
|
local service="$1"
|
||||||
local container="$1"
|
local container="$1"
|
||||||
local backup_start_time=$(date +%s)
|
local backup_start_time
|
||||||
|
backup_start_time=$(date +%s)
|
||||||
|
|
||||||
log_message "Starting backup for service: $service"
|
log_message "Starting backup for service: $service"
|
||||||
|
|
||||||
# Handle special cases for container names
|
# Handle special cases for container names
|
||||||
case "$service" in
|
case "$service" in
|
||||||
jellyseerr_db|jellyseerr_settings)
|
jellyseerr_db|jellyseerr_settings)
|
||||||
container="jellyseerr"
|
container="jellyseerr"
|
||||||
;;
|
;;
|
||||||
esac
|
esac
|
||||||
|
|
||||||
# Check if container is running
|
# Check if container is running
|
||||||
if ! check_container_running "$container"; then
|
if ! check_container_running "$container"; then
|
||||||
log_file_details "$service" "${container}:${MEDIA_SERVICES[$service]}" "${BACKUP_DESTINATIONS[$service]}" "FAILED - Container not running"
|
log_file_details "$service" "${container}:${MEDIA_SERVICES[$service]}" "${BACKUP_DESTINATIONS[$service]}" "FAILED - Container not running"
|
||||||
return 1
|
return 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
local src_path="${MEDIA_SERVICES[$service]}"
|
local src_path="${MEDIA_SERVICES[$service]}"
|
||||||
local dest_path="${BACKUP_DESTINATIONS[$service]}"
|
local dest_path="${BACKUP_DESTINATIONS[$service]}"
|
||||||
|
|
||||||
# Create destination directory for jellyseerr
|
# Create destination directory for jellyseerr
|
||||||
if [[ "$service" == jellyseerr_* ]]; then
|
if [[ "$service" == jellyseerr_* ]]; then
|
||||||
mkdir -p "$(dirname "$dest_path")"
|
mkdir -p "$(dirname "$dest_path")"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Perform the backup
|
# Perform the backup
|
||||||
if [ "$DRY_RUN" == true ]; then
|
if [ "$DRY_RUN" == true ]; then
|
||||||
log_info "DRY RUN: Would backup $container:$src_path to $dest_path"
|
log_info "DRY RUN: Would backup $container:$src_path to $dest_path"
|
||||||
log_file_details "$service" "$container:$src_path" "$dest_path" "DRY RUN"
|
log_file_details "$service" "$container:$src_path" "$dest_path" "DRY RUN"
|
||||||
return 0
|
return 0
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [ "$INTERACTIVE_MODE" == true ]; then
|
if [ "$INTERACTIVE_MODE" == true ]; then
|
||||||
echo -n "Backup $service? (y/N): "
|
echo -n "Backup $service? (y/N): "
|
||||||
read -r response
|
read -r response
|
||||||
@@ -482,14 +457,14 @@ backup_service() {
|
|||||||
return 0
|
return 0
|
||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Execute docker cp command
|
# Execute docker cp command
|
||||||
local docker_cmd="docker cp $container:$src_path $dest_path"
|
local docker_cmd="docker cp $container:$src_path $dest_path"
|
||||||
log_info "Executing: $docker_cmd"
|
log_info "Executing: $docker_cmd"
|
||||||
|
|
||||||
if $docker_cmd 2>&1 | tee -a "$LOG_FILE"; then
|
if $docker_cmd 2>&1 | tee -a "$LOG_FILE"; then
|
||||||
log_success "Backup completed for $service"
|
log_success "Backup completed for $service"
|
||||||
|
|
||||||
# Verify the backup
|
# Verify the backup
|
||||||
if verify_backup "$container" "$src_path" "$dest_path"; then
|
if verify_backup "$container" "$src_path" "$dest_path"; then
|
||||||
log_file_details "$service" "$container:$src_path" "$dest_path" "SUCCESS"
|
log_file_details "$service" "$container:$src_path" "$dest_path" "SUCCESS"
|
||||||
@@ -510,7 +485,7 @@ backup_service() {
|
|||||||
backup_service_wrapper() {
|
backup_service_wrapper() {
|
||||||
local service="$1"
|
local service="$1"
|
||||||
local temp_file="$2"
|
local temp_file="$2"
|
||||||
|
|
||||||
if backup_service "$service"; then
|
if backup_service "$service"; then
|
||||||
echo "SUCCESS:$service" >> "$temp_file"
|
echo "SUCCESS:$service" >> "$temp_file"
|
||||||
else
|
else
|
||||||
@@ -521,45 +496,47 @@ backup_service_wrapper() {
|
|||||||
# Clean old backups based on age and count
|
# Clean old backups based on age and count
|
||||||
cleanup_old_backups() {
|
cleanup_old_backups() {
|
||||||
log_message "Cleaning up old backups..."
|
log_message "Cleaning up old backups..."
|
||||||
|
|
||||||
for service_dir in "${BACKUP_ROOT}"/*; do
|
for service_dir in "${BACKUP_ROOT}"/*; do
|
||||||
if [ ! -d "$service_dir" ]; then
|
if [ ! -d "$service_dir" ]; then
|
||||||
continue
|
continue
|
||||||
fi
|
fi
|
||||||
|
|
||||||
local service=$(basename "$service_dir")
|
local service
|
||||||
|
service=$(basename "$service_dir")
|
||||||
log_info "Cleaning up old backups for $service"
|
log_info "Cleaning up old backups for $service"
|
||||||
|
|
||||||
# Remove backups older than MAX_BACKUP_AGE_DAYS
|
# Remove backups older than MAX_BACKUP_AGE_DAYS
|
||||||
find "$service_dir" -type f -mtime +${MAX_BACKUP_AGE_DAYS} -delete 2>/dev/null || true
|
find "$service_dir" -type f -mtime +${MAX_BACKUP_AGE_DAYS} -delete 2>/dev/null || true
|
||||||
find "$service_dir" -type d -empty -mtime +${MAX_BACKUP_AGE_DAYS} -delete 2>/dev/null || true
|
find "$service_dir" -type d -empty -mtime +${MAX_BACKUP_AGE_DAYS} -delete 2>/dev/null || true
|
||||||
|
|
||||||
# Keep only the most recent MAX_BACKUPS_TO_KEEP backups
|
# Keep only the most recent MAX_BACKUPS_TO_KEEP backups
|
||||||
find "$service_dir" -type f -name "*.ini" -o -name "*.json" | sort -r | tail -n +$((MAX_BACKUPS_TO_KEEP + 1)) | xargs rm -f 2>/dev/null || true
|
find "$service_dir" -type f -name "*.ini" -o -name "*.json" | sort -r | tail -n +$((MAX_BACKUPS_TO_KEEP + 1)) | xargs rm -f 2>/dev/null || true
|
||||||
|
|
||||||
# Clean up old dated directories (for jellyseerr)
|
# Clean up old dated directories (for jellyseerr)
|
||||||
find "$service_dir" -type d -name "backup_*" | sort -r | tail -n +$((MAX_BACKUPS_TO_KEEP + 1)) | xargs rm -rf 2>/dev/null || true
|
find "$service_dir" -type d -name "backup_*" | sort -r | tail -n +$((MAX_BACKUPS_TO_KEEP + 1)) | xargs rm -rf 2>/dev/null || true
|
||||||
done
|
done
|
||||||
|
|
||||||
# Clean up old log files
|
# Clean up old log files
|
||||||
find "$LOG_ROOT" -name "media-backup-*.log" -mtime +${MAX_BACKUP_AGE_DAYS} -delete 2>/dev/null || true
|
find "$LOG_ROOT" -name "media-backup-*.log" -mtime +${MAX_BACKUP_AGE_DAYS} -delete 2>/dev/null || true
|
||||||
find "$LOG_ROOT" -name "media-backup-*.md" -mtime +${MAX_BACKUP_AGE_DAYS} -delete 2>/dev/null || true
|
find "$LOG_ROOT" -name "media-backup-*.md" -mtime +${MAX_BACKUP_AGE_DAYS} -delete 2>/dev/null || true
|
||||||
|
|
||||||
log_success "Cleanup completed"
|
log_success "Cleanup completed"
|
||||||
}
|
}
|
||||||
|
|
||||||
# Check disk space
|
# Check disk space
|
||||||
check_disk_space() {
|
check_disk_space() {
|
||||||
local required_space_mb=1000 # Minimum 1GB free space
|
local required_space_mb=1000 # Minimum 1GB free space
|
||||||
|
|
||||||
local available_space_kb=$(df "$BACKUP_ROOT" | awk 'NR==2 {print $4}')
|
local available_space_kb
|
||||||
|
available_space_kb=$(df "$BACKUP_ROOT" | awk 'NR==2 {print $4}')
|
||||||
local available_space_mb=$((available_space_kb / 1024))
|
local available_space_mb=$((available_space_kb / 1024))
|
||||||
|
|
||||||
if [ "$available_space_mb" -lt "$required_space_mb" ]; then
|
if [ "$available_space_mb" -lt "$required_space_mb" ]; then
|
||||||
log_error "Insufficient disk space. Available: ${available_space_mb}MB, Required: ${required_space_mb}MB"
|
log_error "Insufficient disk space. Available: ${available_space_mb}MB, Required: ${required_space_mb}MB"
|
||||||
return 1
|
return 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
log_info "Disk space check passed. Available: ${available_space_mb}MB"
|
log_info "Disk space check passed. Available: ${available_space_mb}MB"
|
||||||
return 0
|
return 0
|
||||||
}
|
}
|
||||||
@@ -569,14 +546,15 @@ send_notification() {
|
|||||||
local title="$1"
|
local title="$1"
|
||||||
local message="$2"
|
local message="$2"
|
||||||
local status="${3:-info}"
|
local status="${3:-info}"
|
||||||
local hostname=$(hostname)
|
local hostname
|
||||||
|
hostname=$(hostname)
|
||||||
local total_services=${#MEDIA_SERVICES[@]}
|
local total_services=${#MEDIA_SERVICES[@]}
|
||||||
local success_count="$4"
|
local success_count="$4"
|
||||||
local failed_count="$5"
|
local failed_count="$5"
|
||||||
|
|
||||||
# Enhanced message with statistics
|
# Enhanced message with statistics
|
||||||
local enhanced_message="$message\n\nServices: $total_services\nSuccessful: $success_count\nFailed: $failed_count\nHost: $hostname"
|
local enhanced_message="$message\n\nServices: $total_services\nSuccessful: $success_count\nFailed: $failed_count\nHost: $hostname"
|
||||||
|
|
||||||
# Console notification
|
# Console notification
|
||||||
case "$status" in
|
case "$status" in
|
||||||
"success") log_success "$title: $enhanced_message" ;;
|
"success") log_success "$title: $enhanced_message" ;;
|
||||||
@@ -584,12 +562,12 @@ send_notification() {
|
|||||||
"warning") log_warning "$title: $enhanced_message" ;;
|
"warning") log_warning "$title: $enhanced_message" ;;
|
||||||
*) log_info "$title: $enhanced_message" ;;
|
*) log_info "$title: $enhanced_message" ;;
|
||||||
esac
|
esac
|
||||||
|
|
||||||
# Webhook notification
|
# Webhook notification
|
||||||
if [ -n "$WEBHOOK_URL" ] && [ "$DRY_RUN" != true ]; then
|
if [ -n "$WEBHOOK_URL" ] && [ "$DRY_RUN" != true ]; then
|
||||||
local tags="backup,media,${hostname}"
|
local tags="backup,media,${hostname}"
|
||||||
[ "$failed_count" -gt 0 ] && tags="${tags},errors"
|
[ "$failed_count" -gt 0 ] && tags="${tags},errors"
|
||||||
|
|
||||||
curl -s \
|
curl -s \
|
||||||
-H "tags:${tags}" \
|
-H "tags:${tags}" \
|
||||||
-d "$enhanced_message" \
|
-d "$enhanced_message" \
|
||||||
@@ -602,7 +580,7 @@ generate_summary_report() {
|
|||||||
local success_count="$1"
|
local success_count="$1"
|
||||||
local failed_count="$2"
|
local failed_count="$2"
|
||||||
local total_time="$3"
|
local total_time="$3"
|
||||||
|
|
||||||
log_message "=== BACKUP SUMMARY REPORT ==="
|
log_message "=== BACKUP SUMMARY REPORT ==="
|
||||||
log_message "Total Services: ${#MEDIA_SERVICES[@]}"
|
log_message "Total Services: ${#MEDIA_SERVICES[@]}"
|
||||||
log_message "Successful Backups: $success_count"
|
log_message "Successful Backups: $success_count"
|
||||||
@@ -610,11 +588,11 @@ generate_summary_report() {
|
|||||||
log_message "Total Time: ${total_time}s"
|
log_message "Total Time: ${total_time}s"
|
||||||
log_message "Log File: $LOG_FILE"
|
log_message "Log File: $LOG_FILE"
|
||||||
log_message "Markdown Report: $MARKDOWN_LOG"
|
log_message "Markdown Report: $MARKDOWN_LOG"
|
||||||
|
|
||||||
if [ "$PERFORMANCE_MONITORING" == true ]; then
|
if [ "$PERFORMANCE_MONITORING" == true ]; then
|
||||||
log_message "Performance Log: $PERFORMANCE_LOG_FILE"
|
log_message "Performance Log: $PERFORMANCE_LOG_FILE"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Add summary to markdown log
|
# Add summary to markdown log
|
||||||
{
|
{
|
||||||
echo "# Media Backup Summary Report"
|
echo "# Media Backup Summary Report"
|
||||||
@@ -630,18 +608,19 @@ generate_summary_report() {
|
|||||||
|
|
||||||
# Main backup execution function
|
# Main backup execution function
|
||||||
main() {
|
main() {
|
||||||
local script_start_time=$(date +%s)
|
local script_start_time
|
||||||
|
script_start_time=$(date +%s)
|
||||||
|
|
||||||
log_message "=== MEDIA SERVICES BACKUP STARTED ==="
|
log_message "=== MEDIA SERVICES BACKUP STARTED ==="
|
||||||
log_message "Host: $(hostname)"
|
log_message "Host: $(hostname)"
|
||||||
log_message "Timestamp: $(date '+%Y-%m-%d %H:%M:%S')"
|
log_message "Timestamp: $(date '+%Y-%m-%d %H:%M:%S')"
|
||||||
log_message "Dry Run: $DRY_RUN"
|
log_message "Dry Run: $DRY_RUN"
|
||||||
log_message "Parallel Mode: $PARALLEL_BACKUPS"
|
log_message "Parallel Mode: $PARALLEL_BACKUPS"
|
||||||
log_message "Verify Backups: $VERIFY_BACKUPS"
|
log_message "Verify Backups: $VERIFY_BACKUPS"
|
||||||
|
|
||||||
# Initialize logging
|
# Initialize logging
|
||||||
initialize_json_log
|
initialize_json_log
|
||||||
|
|
||||||
# Initialize markdown log
|
# Initialize markdown log
|
||||||
{
|
{
|
||||||
echo "# Media Services Backup Report"
|
echo "# Media Services Backup Report"
|
||||||
@@ -649,44 +628,45 @@ main() {
|
|||||||
echo "**Host**: $(hostname)"
|
echo "**Host**: $(hostname)"
|
||||||
echo ""
|
echo ""
|
||||||
} > "$MARKDOWN_LOG"
|
} > "$MARKDOWN_LOG"
|
||||||
|
|
||||||
# Pre-flight checks
|
# Pre-flight checks
|
||||||
if ! check_disk_space; then
|
if ! check_disk_space; then
|
||||||
send_notification "Media Backup Failed" "Insufficient disk space" "error" 0 1
|
send_notification "Media Backup Failed" "Insufficient disk space" "error" 0 1
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Check if Docker is running
|
# Check if Docker is running
|
||||||
if ! docker info >/dev/null 2>&1; then
|
if ! docker info >/dev/null 2>&1; then
|
||||||
log_error "Docker is not running or accessible"
|
log_error "Docker is not running or accessible"
|
||||||
send_notification "Media Backup Failed" "Docker is not accessible" "error" 0 1
|
send_notification "Media Backup Failed" "Docker is not accessible" "error" 0 1
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
local success_count=0
|
local success_count=0
|
||||||
local failed_count=0
|
local failed_count=0
|
||||||
local backup_results=()
|
local backup_results=()
|
||||||
|
|
||||||
if [ "$PARALLEL_BACKUPS" == true ]; then
|
if [ "$PARALLEL_BACKUPS" == true ]; then
|
||||||
log_message "Running backups in parallel mode"
|
log_message "Running backups in parallel mode"
|
||||||
|
|
||||||
# Create temporary file for collecting results
|
# Create temporary file for collecting results
|
||||||
local temp_results=$(mktemp)
|
local temp_results
|
||||||
|
temp_results=$(mktemp)
|
||||||
local pids=()
|
local pids=()
|
||||||
|
|
||||||
# Start backup jobs in parallel
|
# Start backup jobs in parallel
|
||||||
for service in "${!MEDIA_SERVICES[@]}"; do
|
for service in "${!MEDIA_SERVICES[@]}"; do
|
||||||
backup_service_wrapper "$service" "$temp_results" &
|
backup_service_wrapper "$service" "$temp_results" &
|
||||||
pids+=($!)
|
pids+=($!)
|
||||||
log_info "Started backup job for $service (PID: $!)"
|
log_info "Started backup job for $service (PID: $!)"
|
||||||
done
|
done
|
||||||
|
|
||||||
# Wait for all jobs to complete
|
# Wait for all jobs to complete
|
||||||
for pid in "${pids[@]}"; do
|
for pid in "${pids[@]}"; do
|
||||||
wait "$pid"
|
wait "$pid"
|
||||||
log_info "Backup job completed (PID: $pid)"
|
log_info "Backup job completed (PID: $pid)"
|
||||||
done
|
done
|
||||||
|
|
||||||
# Collect results
|
# Collect results
|
||||||
while IFS= read -r result; do
|
while IFS= read -r result; do
|
||||||
if [[ "$result" == SUCCESS:* ]]; then
|
if [[ "$result" == SUCCESS:* ]]; then
|
||||||
@@ -697,12 +677,12 @@ main() {
|
|||||||
backup_results+=("✗ ${result#FAILED:}")
|
backup_results+=("✗ ${result#FAILED:}")
|
||||||
fi
|
fi
|
||||||
done < "$temp_results"
|
done < "$temp_results"
|
||||||
|
|
||||||
rm -f "$temp_results"
|
rm -f "$temp_results"
|
||||||
|
|
||||||
else
|
else
|
||||||
log_message "Running backups in sequential mode"
|
log_message "Running backups in sequential mode"
|
||||||
|
|
||||||
# Run backups sequentially
|
# Run backups sequentially
|
||||||
for service in "${!MEDIA_SERVICES[@]}"; do
|
for service in "${!MEDIA_SERVICES[@]}"; do
|
||||||
if backup_service "$service"; then
|
if backup_service "$service"; then
|
||||||
@@ -714,22 +694,23 @@ main() {
|
|||||||
fi
|
fi
|
||||||
done
|
done
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Calculate total time
|
# Calculate total time
|
||||||
local script_end_time=$(date +%s)
|
local script_end_time
|
||||||
|
script_end_time=$(date +%s)
|
||||||
local total_time=$((script_end_time - script_start_time))
|
local total_time=$((script_end_time - script_start_time))
|
||||||
|
|
||||||
# Track overall performance
|
# Track overall performance
|
||||||
track_performance "full_media_backup" "$script_start_time" "$script_end_time"
|
track_performance "full_media_backup" "$script_start_time" "$script_end_time"
|
||||||
|
|
||||||
# Clean up old backups (only if not dry run)
|
# Clean up old backups (only if not dry run)
|
||||||
if [ "$DRY_RUN" != true ]; then
|
if [ "$DRY_RUN" != true ]; then
|
||||||
cleanup_old_backups
|
cleanup_old_backups
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Generate summary report
|
# Generate summary report
|
||||||
generate_summary_report "$success_count" "$failed_count" "$total_time"
|
generate_summary_report "$success_count" "$failed_count" "$total_time"
|
||||||
|
|
||||||
# Add results to markdown log
|
# Add results to markdown log
|
||||||
{
|
{
|
||||||
echo "## Backup Results"
|
echo "## Backup Results"
|
||||||
@@ -740,28 +721,28 @@ main() {
|
|||||||
echo "**Completed**: $(date '+%Y-%m-%d %H:%M:%S')"
|
echo "**Completed**: $(date '+%Y-%m-%d %H:%M:%S')"
|
||||||
echo "**Duration**: ${total_time}s"
|
echo "**Duration**: ${total_time}s"
|
||||||
} >> "$MARKDOWN_LOG"
|
} >> "$MARKDOWN_LOG"
|
||||||
|
|
||||||
# Send notification
|
# Send notification
|
||||||
local status="success"
|
local status="success"
|
||||||
local message="Media backup completed"
|
local message="Media backup completed"
|
||||||
|
|
||||||
if [ "$failed_count" -gt 0 ]; then
|
if [ "$failed_count" -gt 0 ]; then
|
||||||
status="warning"
|
status="warning"
|
||||||
message="Media backup completed with $failed_count failures"
|
message="Media backup completed with $failed_count failures"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [ "$DRY_RUN" == true ]; then
|
if [ "$DRY_RUN" == true ]; then
|
||||||
message="Media backup dry run completed"
|
message="Media backup dry run completed"
|
||||||
status="info"
|
status="info"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
send_notification "Media Backup Complete" "$message" "$status" "$success_count" "$failed_count"
|
send_notification "Media Backup Complete" "$message" "$status" "$success_count" "$failed_count"
|
||||||
|
|
||||||
# Exit with error code if any backups failed
|
# Exit with error code if any backups failed
|
||||||
if [ "$failed_count" -gt 0 ]; then
|
if [ "$failed_count" -gt 0 ]; then
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
log_success "All media backups completed successfully!"
|
log_success "All media backups completed successfully!"
|
||||||
exit 0
|
exit 0
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -115,7 +115,7 @@ show_status() {
|
|||||||
# Check deployment config
|
# Check deployment config
|
||||||
if [ -d "$HOME/.docker-deployment" ]; then
|
if [ -d "$HOME/.docker-deployment" ]; then
|
||||||
echo -e "${GREEN}✅ Deployment configuration: Ready${NC}"
|
echo -e "${GREEN}✅ Deployment configuration: Ready${NC}"
|
||||||
local servers=$(ls "$HOME/.docker-deployment/servers/"*.yml 2>/dev/null | wc -l)
|
local servers=$(find . -maxdepth 1 -type f | wc -l)
|
||||||
echo " Configured servers: $servers"
|
echo " Configured servers: $servers"
|
||||||
else
|
else
|
||||||
echo -e "${RED}❌ Deployment configuration: Not initialized${NC}"
|
echo -e "${RED}❌ Deployment configuration: Not initialized${NC}"
|
||||||
|
|||||||
224
docker-deployment/deployment-env-integration.sh.sc2012_backup
Executable file
224
docker-deployment/deployment-env-integration.sh.sc2012_backup
Executable file
@@ -0,0 +1,224 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
# deployment-env-integration.sh - Integrate deployment manager with existing env backup system
|
||||||
|
# Author: Shell Repository
|
||||||
|
# Description: Bridge between docker-deployment-manager and backup-env-files system
|
||||||
|
|
||||||
|
set -e
|
||||||
|
|
||||||
|
# Colors for output
|
||||||
|
GREEN='\033[0;32m'
|
||||||
|
YELLOW='\033[0;33m'
|
||||||
|
RED='\033[0;31m'
|
||||||
|
BLUE='\033[0;34m'
|
||||||
|
NC='\033[0m' # No Color
|
||||||
|
|
||||||
|
# Configuration
|
||||||
|
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||||
|
DEPLOYMENT_MANAGER="$SCRIPT_DIR/docker-deployment-manager.sh"
|
||||||
|
ENV_BACKUP_SCRIPT="$SCRIPT_DIR/backup-env-files.sh"
|
||||||
|
STACK_HELPER="$SCRIPT_DIR/stack-assignment-helper.sh"
|
||||||
|
|
||||||
|
echo -e "${BLUE}=== Docker Deployment & Environment Backup Integration ===${NC}"
|
||||||
|
echo ""
|
||||||
|
|
||||||
|
# Check if required scripts exist
|
||||||
|
check_dependencies() {
|
||||||
|
local missing=()
|
||||||
|
|
||||||
|
[ ! -f "$DEPLOYMENT_MANAGER" ] && missing+=("docker-deployment-manager.sh")
|
||||||
|
[ ! -f "$ENV_BACKUP_SCRIPT" ] && missing+=("backup-env-files.sh")
|
||||||
|
[ ! -f "$STACK_HELPER" ] && missing+=("stack-assignment-helper.sh")
|
||||||
|
|
||||||
|
if [ ${#missing[@]} -gt 0 ]; then
|
||||||
|
echo -e "${RED}Missing required scripts:${NC}"
|
||||||
|
printf ' - %s\n' "${missing[@]}"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
# Setup integration
|
||||||
|
setup_integration() {
|
||||||
|
echo -e "${YELLOW}Setting up deployment and backup integration...${NC}"
|
||||||
|
|
||||||
|
# Initialize deployment configuration
|
||||||
|
if [ ! -d "$HOME/.docker-deployment" ]; then
|
||||||
|
echo "1. Initializing deployment configuration..."
|
||||||
|
"$DEPLOYMENT_MANAGER" init
|
||||||
|
else
|
||||||
|
echo -e "${GREEN}✓ Deployment configuration already exists${NC}"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Initialize environment backup if not already done
|
||||||
|
if [ ! -d "$HOME/.env-backup" ]; then
|
||||||
|
echo ""
|
||||||
|
echo "2. Environment backup system needs initialization."
|
||||||
|
echo " Run: $ENV_BACKUP_SCRIPT --init"
|
||||||
|
echo " This will set up secure backup of your .env files to Gitea."
|
||||||
|
else
|
||||||
|
echo -e "${GREEN}✓ Environment backup already configured${NC}"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Analyze current stacks
|
||||||
|
echo ""
|
||||||
|
echo "3. Analyzing current Docker stacks..."
|
||||||
|
"$STACK_HELPER" analyze
|
||||||
|
|
||||||
|
echo ""
|
||||||
|
echo -e "${GREEN}✓ Integration setup completed!${NC}"
|
||||||
|
}
|
||||||
|
|
||||||
|
# Show workflow suggestions
|
||||||
|
show_workflow() {
|
||||||
|
echo -e "${BLUE}=== Recommended Workflow ===${NC}"
|
||||||
|
echo ""
|
||||||
|
|
||||||
|
echo -e "${YELLOW}📋 Daily Operations:${NC}"
|
||||||
|
echo "1. Make changes to Docker stacks in your monorepo"
|
||||||
|
echo "2. Test locally before deployment"
|
||||||
|
echo "3. Backup environment files: $ENV_BACKUP_SCRIPT"
|
||||||
|
echo "4. Deploy to specific server: $DEPLOYMENT_MANAGER deploy <server>"
|
||||||
|
echo "5. Verify deployment: $DEPLOYMENT_MANAGER status <server>"
|
||||||
|
echo ""
|
||||||
|
|
||||||
|
echo -e "${YELLOW}🔄 Bulk Operations:${NC}"
|
||||||
|
echo "1. Deploy all stacks: $DEPLOYMENT_MANAGER deploy-all --dry-run"
|
||||||
|
echo "2. Check what goes where: $DEPLOYMENT_MANAGER map"
|
||||||
|
echo "3. Sync just environments: $DEPLOYMENT_MANAGER sync-env <server>"
|
||||||
|
echo ""
|
||||||
|
|
||||||
|
echo -e "${YELLOW}📊 Analysis & Planning:${NC}"
|
||||||
|
echo "1. Analyze stack assignments: $STACK_HELPER analyze"
|
||||||
|
echo "2. Check resource usage: $STACK_HELPER resources"
|
||||||
|
echo "3. Get optimization tips: $STACK_HELPER optimize"
|
||||||
|
echo "4. Generate new configs: $STACK_HELPER generate"
|
||||||
|
echo ""
|
||||||
|
|
||||||
|
echo -e "${YELLOW}🔧 Automation Integration:${NC}"
|
||||||
|
echo "These commands can be integrated into your existing crontab system:"
|
||||||
|
echo ""
|
||||||
|
echo "# Daily environment backup (already in crontab)"
|
||||||
|
echo "0 3 * * * $ENV_BACKUP_SCRIPT"
|
||||||
|
echo ""
|
||||||
|
echo "# Weekly deployment validation"
|
||||||
|
echo "0 4 * * 0 $DEPLOYMENT_MANAGER deploy-all --dry-run"
|
||||||
|
echo ""
|
||||||
|
echo "# Monthly stack analysis"
|
||||||
|
echo "0 5 1 * * $STACK_HELPER all > /home/acedanger/shell/logs/stack-analysis.log"
|
||||||
|
}
|
||||||
|
|
||||||
|
# Show current status
|
||||||
|
show_status() {
|
||||||
|
echo -e "${BLUE}=== Current System Status ===${NC}"
|
||||||
|
echo ""
|
||||||
|
|
||||||
|
# Check deployment config
|
||||||
|
if [ -d "$HOME/.docker-deployment" ]; then
|
||||||
|
echo -e "${GREEN}✅ Deployment configuration: Ready${NC}"
|
||||||
|
local servers=$(ls "$HOME/.docker-deployment/servers/"*.yml 2>/dev/null | wc -l)
|
||||||
|
echo " Configured servers: $servers"
|
||||||
|
else
|
||||||
|
echo -e "${RED}❌ Deployment configuration: Not initialized${NC}"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Check environment backup
|
||||||
|
if [ -d "$HOME/.env-backup" ]; then
|
||||||
|
echo -e "${GREEN}✅ Environment backup: Ready${NC}"
|
||||||
|
local last_backup=$(stat -c %y "$HOME/.env-backup/.git/HEAD" 2>/dev/null | cut -d' ' -f1 || echo "Never")
|
||||||
|
echo " Last backup: $last_backup"
|
||||||
|
else
|
||||||
|
echo -e "${YELLOW}⚠️ Environment backup: Not initialized${NC}"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Check Docker stacks
|
||||||
|
if [ -d "$HOME/docker" ]; then
|
||||||
|
local stack_count=$(find "$HOME/docker" -maxdepth 1 -type d | wc -l)
|
||||||
|
stack_count=$((stack_count - 1)) # Exclude the docker directory itself
|
||||||
|
echo -e "${GREEN}✅ Docker stacks: $stack_count found${NC}"
|
||||||
|
else
|
||||||
|
echo -e "${RED}❌ Docker directory: Not found${NC}"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Check crontab integration
|
||||||
|
if crontab -l 2>/dev/null | grep -q "backup-env-files.sh"; then
|
||||||
|
echo -e "${GREEN}✅ Crontab integration: Environment backup scheduled${NC}"
|
||||||
|
else
|
||||||
|
echo -e "${YELLOW}⚠️ Crontab integration: No env backup scheduled${NC}"
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
# Test the integration
|
||||||
|
test_integration() {
|
||||||
|
echo -e "${BLUE}=== Testing Integration ===${NC}"
|
||||||
|
echo ""
|
||||||
|
|
||||||
|
echo "1. Testing deployment manager..."
|
||||||
|
if "$DEPLOYMENT_MANAGER" map >/dev/null 2>&1; then
|
||||||
|
echo -e "${GREEN}✅ Deployment manager: Working${NC}"
|
||||||
|
else
|
||||||
|
echo -e "${RED}❌ Deployment manager: Error${NC}"
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo "2. Testing environment backup..."
|
||||||
|
if "$ENV_BACKUP_SCRIPT" --list >/dev/null 2>&1; then
|
||||||
|
echo -e "${GREEN}✅ Environment backup: Working${NC}"
|
||||||
|
else
|
||||||
|
echo -e "${YELLOW}⚠️ Environment backup: Needs initialization${NC}"
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo "3. Testing stack analysis..."
|
||||||
|
if "$STACK_HELPER" analyze >/dev/null 2>&1; then
|
||||||
|
echo -e "${GREEN}✅ Stack analysis: Working${NC}"
|
||||||
|
else
|
||||||
|
echo -e "${RED}❌ Stack analysis: Error${NC}"
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo ""
|
||||||
|
echo -e "${BLUE}Integration test completed.${NC}"
|
||||||
|
}
|
||||||
|
|
||||||
|
# Main function
|
||||||
|
main() {
|
||||||
|
check_dependencies
|
||||||
|
|
||||||
|
case "${1:-status}" in
|
||||||
|
setup|--setup|-s)
|
||||||
|
setup_integration
|
||||||
|
;;
|
||||||
|
workflow|--workflow|-w)
|
||||||
|
show_workflow
|
||||||
|
;;
|
||||||
|
status|--status)
|
||||||
|
show_status
|
||||||
|
;;
|
||||||
|
test|--test|-t)
|
||||||
|
test_integration
|
||||||
|
;;
|
||||||
|
all|--all|-a)
|
||||||
|
show_status
|
||||||
|
echo ""
|
||||||
|
setup_integration
|
||||||
|
echo ""
|
||||||
|
show_workflow
|
||||||
|
;;
|
||||||
|
help|--help|-h)
|
||||||
|
echo "Usage: $0 [COMMAND]"
|
||||||
|
echo ""
|
||||||
|
echo "Commands:"
|
||||||
|
echo " setup Initialize deployment and backup integration"
|
||||||
|
echo " workflow Show recommended workflow"
|
||||||
|
echo " status Show current system status (default)"
|
||||||
|
echo " test Test integration components"
|
||||||
|
echo " all Run status, setup, and show workflow"
|
||||||
|
echo " help Show this help message"
|
||||||
|
;;
|
||||||
|
*)
|
||||||
|
echo -e "${RED}Unknown command: $1${NC}"
|
||||||
|
echo "Use '$0 help' for usage information"
|
||||||
|
exit 1
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
}
|
||||||
|
|
||||||
|
# Run main function
|
||||||
|
main "$@"
|
||||||
632
docker-deployment/docker-deployment-manager.sh.sc2012_backup
Executable file
632
docker-deployment/docker-deployment-manager.sh.sc2012_backup
Executable file
@@ -0,0 +1,632 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
# docker-deployment-manager.sh - Manage Docker stack deployments across multiple servers
|
||||||
|
# Author: Shell Repository
|
||||||
|
# Description: Deploy specific Docker stacks to designated servers while maintaining monorepo structure
|
||||||
|
|
||||||
|
set -e
|
||||||
|
|
||||||
|
# Colors for output
|
||||||
|
GREEN='\033[0;32m'
|
||||||
|
YELLOW='\033[0;33m'
|
||||||
|
RED='\033[0;31m'
|
||||||
|
BLUE='\033[0;34m'
|
||||||
|
NC='\033[0m' # No Color
|
||||||
|
|
||||||
|
# Configuration
|
||||||
|
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||||
|
DOCKER_DIR="$HOME/docker"
|
||||||
|
DEPLOYMENT_CONFIG_DIR="$HOME/.docker-deployment"
|
||||||
|
LOG_FILE="$SCRIPT_DIR/logs/deployment.log"
|
||||||
|
|
||||||
|
# Ensure directories exist
|
||||||
|
mkdir -p "$(dirname "$LOG_FILE")"
|
||||||
|
mkdir -p "$DEPLOYMENT_CONFIG_DIR"/{config,servers,stacks,logs}
|
||||||
|
|
||||||
|
# Logging function
|
||||||
|
log() {
|
||||||
|
echo "$(date '+%Y-%m-%d %H:%M:%S') - $1" | tee -a "$LOG_FILE"
|
||||||
|
}
|
||||||
|
|
||||||
|
# Display usage information
|
||||||
|
usage() {
|
||||||
|
echo "Usage: $0 [OPTIONS] [COMMAND]"
|
||||||
|
echo ""
|
||||||
|
echo "Manage Docker stack deployments across multiple servers"
|
||||||
|
echo ""
|
||||||
|
echo "Commands:"
|
||||||
|
echo " init Initialize deployment configuration"
|
||||||
|
echo " map Show stack-to-server mapping"
|
||||||
|
echo " deploy SERVER Deploy stacks to specific server"
|
||||||
|
echo " deploy-all Deploy all stacks to their designated servers"
|
||||||
|
echo " status SERVER Check deployment status on server"
|
||||||
|
echo " sync-env SERVER Sync environment files to server"
|
||||||
|
echo " rollback SERVER Rollback to previous deployment"
|
||||||
|
echo ""
|
||||||
|
echo "Options:"
|
||||||
|
echo " -h, --help Show this help message"
|
||||||
|
echo " -d, --dry-run Show what would be deployed without doing it"
|
||||||
|
echo " -f, --force Force deployment even if checks fail"
|
||||||
|
echo " -v, --verbose Verbose output"
|
||||||
|
echo " --config-only Only sync configuration files"
|
||||||
|
echo " --env-only Only sync environment files"
|
||||||
|
echo ""
|
||||||
|
echo "Examples:"
|
||||||
|
echo " $0 init # First time setup"
|
||||||
|
echo " $0 map # See what goes where"
|
||||||
|
echo " $0 deploy europa # Deploy Europa stacks"
|
||||||
|
echo " $0 deploy-all --dry-run # Test full deployment"
|
||||||
|
echo " $0 sync-env io # Sync .env files to IO server"
|
||||||
|
}
|
||||||
|
|
||||||
|
# Initialize deployment configuration
|
||||||
|
init_deployment_config() {
|
||||||
|
echo -e "${YELLOW}Initializing Docker deployment configuration...${NC}"
|
||||||
|
|
||||||
|
# Create main configuration file
|
||||||
|
cat > "$DEPLOYMENT_CONFIG_DIR/config.yml" << 'EOF'
|
||||||
|
# Docker Deployment Manager Configuration
|
||||||
|
# This file defines global settings for stack deployment across servers
|
||||||
|
|
||||||
|
deployment:
|
||||||
|
version: "1.0"
|
||||||
|
docker_dir: "~/docker"
|
||||||
|
backup_before_deploy: true
|
||||||
|
health_check_timeout: 30
|
||||||
|
rollback_on_failure: true
|
||||||
|
|
||||||
|
# Multi-server stacks - these will be deployed to ALL servers
|
||||||
|
multi_server_stacks:
|
||||||
|
- dozzle # Docker log viewer
|
||||||
|
- dockge # Docker compose management
|
||||||
|
- diun # Docker image update notifier
|
||||||
|
|
||||||
|
notifications:
|
||||||
|
enabled: true
|
||||||
|
webhook_url: "https://notify.peterwood.rocks/lab"
|
||||||
|
tags: ["deployment", "docker"]
|
||||||
|
|
||||||
|
logging:
|
||||||
|
level: "info"
|
||||||
|
retain_days: 30
|
||||||
|
|
||||||
|
security:
|
||||||
|
verify_checksums: true
|
||||||
|
backup_env_files: true
|
||||||
|
use_secure_transfer: true
|
||||||
|
EOF
|
||||||
|
|
||||||
|
# Create server configurations based on existing crontab analysis
|
||||||
|
cat > "$DEPLOYMENT_CONFIG_DIR/servers/europa.yml" << 'EOF'
|
||||||
|
# Europa Server Configuration - Media Server
|
||||||
|
name: "europa"
|
||||||
|
role: "media-server"
|
||||||
|
description: "Primary media streaming and web services server"
|
||||||
|
|
||||||
|
connection:
|
||||||
|
hostname: "europa"
|
||||||
|
user: "acedanger"
|
||||||
|
ssh_key: "~/.ssh/id_rsa"
|
||||||
|
docker_compose_dir: "~/docker"
|
||||||
|
|
||||||
|
stacks:
|
||||||
|
- plex
|
||||||
|
- jellyfin
|
||||||
|
- traefik
|
||||||
|
- nextcloud
|
||||||
|
- photoprism
|
||||||
|
- immich
|
||||||
|
|
||||||
|
resources:
|
||||||
|
cpu_cores: 4
|
||||||
|
memory_gb: 8
|
||||||
|
storage_gb: 500
|
||||||
|
|
||||||
|
monitoring:
|
||||||
|
health_check_url: "http://europa:8080/health"
|
||||||
|
required_services:
|
||||||
|
- "traefik"
|
||||||
|
- "plex"
|
||||||
|
EOF
|
||||||
|
|
||||||
|
cat > "$DEPLOYMENT_CONFIG_DIR/servers/io.yml" << 'EOF'
|
||||||
|
# IO Server Configuration - Download/Acquisition Server
|
||||||
|
name: "io"
|
||||||
|
role: "download-server"
|
||||||
|
description: "Media acquisition and download management server"
|
||||||
|
|
||||||
|
connection:
|
||||||
|
hostname: "io"
|
||||||
|
user: "acedanger"
|
||||||
|
ssh_key: "~/.ssh/id_rsa"
|
||||||
|
docker_compose_dir: "~/docker"
|
||||||
|
|
||||||
|
stacks:
|
||||||
|
- radarr
|
||||||
|
- sonarr
|
||||||
|
- lidarr
|
||||||
|
- sabnzbd
|
||||||
|
- qbittorrent
|
||||||
|
- prowlarr
|
||||||
|
- overseerr
|
||||||
|
|
||||||
|
resources:
|
||||||
|
cpu_cores: 2
|
||||||
|
memory_gb: 4
|
||||||
|
storage_gb: 200
|
||||||
|
|
||||||
|
monitoring:
|
||||||
|
health_check_url: "http://io:8080/health"
|
||||||
|
required_services:
|
||||||
|
- "sabnzbd"
|
||||||
|
- "radarr"
|
||||||
|
- "sonarr"
|
||||||
|
EOF
|
||||||
|
|
||||||
|
cat > "$DEPLOYMENT_CONFIG_DIR/servers/racknerd.yml" << 'EOF'
|
||||||
|
# Racknerd Server Configuration - Backup Server
|
||||||
|
name: "racknerd"
|
||||||
|
role: "backup-server"
|
||||||
|
description: "Backup, monitoring, and utility services server"
|
||||||
|
|
||||||
|
connection:
|
||||||
|
hostname: "racknerd"
|
||||||
|
user: "acedanger"
|
||||||
|
ssh_key: "~/.ssh/id_rsa"
|
||||||
|
docker_compose_dir: "~/docker"
|
||||||
|
|
||||||
|
stacks:
|
||||||
|
- grafana
|
||||||
|
- prometheus
|
||||||
|
- uptime-kuma
|
||||||
|
- vaultwarden
|
||||||
|
- portainer
|
||||||
|
- watchtower
|
||||||
|
|
||||||
|
resources:
|
||||||
|
cpu_cores: 1
|
||||||
|
memory_gb: 2
|
||||||
|
storage_gb: 100
|
||||||
|
|
||||||
|
monitoring:
|
||||||
|
health_check_url: "http://racknerd:8080/health"
|
||||||
|
required_services:
|
||||||
|
- "uptime-kuma"
|
||||||
|
- "vaultwarden"
|
||||||
|
EOF
|
||||||
|
|
||||||
|
# Create stack metadata examples
|
||||||
|
if [ -d "$DOCKER_DIR/plex" ]; then
|
||||||
|
cat > "$DEPLOYMENT_CONFIG_DIR/stacks/plex.yml" << 'EOF'
|
||||||
|
# Plex Stack Deployment Configuration
|
||||||
|
name: "plex"
|
||||||
|
description: "Plex Media Server"
|
||||||
|
|
||||||
|
deployment:
|
||||||
|
servers: ["europa"]
|
||||||
|
priority: "high"
|
||||||
|
dependencies: ["traefik"]
|
||||||
|
restart_policy: "unless-stopped"
|
||||||
|
|
||||||
|
health_check:
|
||||||
|
enabled: true
|
||||||
|
url: "http://localhost:32400/web"
|
||||||
|
timeout: 30
|
||||||
|
retries: 3
|
||||||
|
|
||||||
|
volumes:
|
||||||
|
- "/mnt/media:/media:ro"
|
||||||
|
- "/mnt/share/plex-config:/config"
|
||||||
|
|
||||||
|
environment:
|
||||||
|
- "PLEX_UID=1000"
|
||||||
|
- "PLEX_GID=1000"
|
||||||
|
- "TZ=America/New_York"
|
||||||
|
|
||||||
|
backup:
|
||||||
|
enabled: true
|
||||||
|
schedule: "0 2 * * *"
|
||||||
|
retention_days: 7
|
||||||
|
EOF
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo -e "${GREEN}Deployment configuration initialized!${NC}"
|
||||||
|
echo -e "${BLUE}Configuration files created in: $DEPLOYMENT_CONFIG_DIR${NC}"
|
||||||
|
echo ""
|
||||||
|
echo -e "${YELLOW}Next steps:${NC}"
|
||||||
|
echo "1. Review and customize server configurations in $DEPLOYMENT_CONFIG_DIR/servers/"
|
||||||
|
echo "2. Add stack metadata files for your Docker stacks"
|
||||||
|
echo "3. Run '$0 map' to see the current mapping"
|
||||||
|
echo "4. Test with '$0 deploy-all --dry-run'"
|
||||||
|
|
||||||
|
log "Deployment configuration initialized"
|
||||||
|
}
|
||||||
|
|
||||||
|
# Load server configuration
|
||||||
|
load_server_config() {
|
||||||
|
local server="$1"
|
||||||
|
local config_file="$DEPLOYMENT_CONFIG_DIR/servers/${server}.yml"
|
||||||
|
|
||||||
|
if [ ! -f "$config_file" ]; then
|
||||||
|
echo -e "${RED}Error: Server configuration not found for '$server'${NC}"
|
||||||
|
echo "Available servers:"
|
||||||
|
ls "$DEPLOYMENT_CONFIG_DIR/servers/" 2>/dev/null | sed 's/\.yml$//' | sed 's/^/ - /'
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# For now, we'll parse YAML manually (could use yq if available)
|
||||||
|
# Extract stacks list from YAML
|
||||||
|
grep -A 50 "stacks:" "$config_file" | grep "^-" | sed 's/^- //' | sed 's/["'\'']//g' | sed 's/#.*//' | sed 's/[[:space:]]*$//'
|
||||||
|
}
|
||||||
|
|
||||||
|
# Load multi-server stacks from config
|
||||||
|
load_multi_server_stacks() {
|
||||||
|
local config_file="$DEPLOYMENT_CONFIG_DIR/config.yml"
|
||||||
|
if [ -f "$config_file" ]; then
|
||||||
|
grep -A 10 "multi_server_stacks:" "$config_file" | grep "^-" | sed 's/^- //' | sed 's/["'\'']//g' | sed 's/#.*//' | sed 's/[[:space:]]*$//'
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
# Show stack-to-server mapping
|
||||||
|
show_mapping() {
|
||||||
|
echo -e "${BLUE}=== Docker Stack to Server Mapping ===${NC}"
|
||||||
|
echo ""
|
||||||
|
|
||||||
|
# Show multi-server stacks first
|
||||||
|
local multi_server_stacks=$(load_multi_server_stacks)
|
||||||
|
if [ -n "$multi_server_stacks" ]; then
|
||||||
|
echo -e "${YELLOW}🌐 Multi-Server Stacks (deployed to ALL servers)${NC}"
|
||||||
|
echo "$multi_server_stacks" | while IFS= read -r stack; do
|
||||||
|
if [ -n "$stack" ]; then
|
||||||
|
local stack_path="$DOCKER_DIR/$stack"
|
||||||
|
local description=""
|
||||||
|
case "$stack" in
|
||||||
|
"dozzle") description="# Docker log viewer" ;;
|
||||||
|
"dockge") description="# Docker compose management" ;;
|
||||||
|
"diun") description="# Docker image update notifier" ;;
|
||||||
|
*) description="# Multi-server tool" ;;
|
||||||
|
esac
|
||||||
|
|
||||||
|
if [ -d "$stack_path" ]; then
|
||||||
|
echo " ✅ $stack $description"
|
||||||
|
else
|
||||||
|
echo " ❌ $stack $description (not found locally)"
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
echo ""
|
||||||
|
fi
|
||||||
|
|
||||||
|
for server_file in "$DEPLOYMENT_CONFIG_DIR/servers/"*.yml; do
|
||||||
|
if [ -f "$server_file" ]; then
|
||||||
|
local server=$(basename "$server_file" .yml)
|
||||||
|
local role=$(grep "role:" "$server_file" | cut -d'"' -f2 2>/dev/null || echo "Unknown")
|
||||||
|
|
||||||
|
echo -e "${GREEN}📍 $server${NC} (${YELLOW}$role${NC})"
|
||||||
|
|
||||||
|
# Get stacks for this server
|
||||||
|
local stacks=$(load_server_config "$server")
|
||||||
|
if [ -n "$stacks" ]; then
|
||||||
|
echo "$stacks" | while IFS= read -r stack; do
|
||||||
|
if [ -n "$stack" ]; then
|
||||||
|
local stack_path="$DOCKER_DIR/$stack"
|
||||||
|
if [ -d "$stack_path" ]; then
|
||||||
|
echo " ✅ $stack (exists)"
|
||||||
|
else
|
||||||
|
echo " ❌ $stack (missing)"
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
else
|
||||||
|
echo " ${YELLOW}No stacks configured${NC}"
|
||||||
|
fi
|
||||||
|
echo ""
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
|
||||||
|
# Show unassigned stacks
|
||||||
|
echo -e "${YELLOW}📦 Unassigned Stacks${NC}"
|
||||||
|
local unassigned_count=0
|
||||||
|
if [ -d "$DOCKER_DIR" ]; then
|
||||||
|
for stack_dir in "$DOCKER_DIR"/*; do
|
||||||
|
if [ -d "$stack_dir" ]; then
|
||||||
|
local stack_name=$(basename "$stack_dir")
|
||||||
|
local assigned=false
|
||||||
|
|
||||||
|
# Check if stack is assigned to any server
|
||||||
|
for server_file in "$DEPLOYMENT_CONFIG_DIR/servers/"*.yml; do
|
||||||
|
if [ -f "$server_file" ]; then
|
||||||
|
if grep -q -- "- $stack_name" "$server_file" 2>/dev/null; then
|
||||||
|
assigned=true
|
||||||
|
break
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
|
||||||
|
# Also check if it's a multi-server stack
|
||||||
|
local multi_server_stacks=$(load_multi_server_stacks)
|
||||||
|
if echo "$multi_server_stacks" | grep -q "^$stack_name$" 2>/dev/null; then
|
||||||
|
assigned=true
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ "$assigned" = false ]; then
|
||||||
|
echo " 🔍 $stack_name"
|
||||||
|
unassigned_count=$((unassigned_count + 1))
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ "$unassigned_count" -eq 0 ]; then
|
||||||
|
echo -e " ${GREEN}✅ All stacks are assigned to servers${NC}"
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
# Sync environment files to server
|
||||||
|
sync_env_files() {
|
||||||
|
local server="$1"
|
||||||
|
local dry_run="$2"
|
||||||
|
|
||||||
|
echo -e "${YELLOW}Syncing environment files to $server...${NC}"
|
||||||
|
|
||||||
|
# Get stacks for this server
|
||||||
|
local stacks=$(load_server_config "$server")
|
||||||
|
|
||||||
|
if [ -z "$stacks" ]; then
|
||||||
|
echo -e "${YELLOW}No stacks configured for server $server${NC}"
|
||||||
|
return 0
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Create temporary directory for sync
|
||||||
|
local temp_dir=$(mktemp -d)
|
||||||
|
local sync_count=0
|
||||||
|
|
||||||
|
echo "$stacks" | while IFS= read -r stack; do
|
||||||
|
if [ -n "$stack" ]; then
|
||||||
|
local stack_path="$DOCKER_DIR/$stack"
|
||||||
|
|
||||||
|
if [ -d "$stack_path" ]; then
|
||||||
|
# Find .env files in stack directory
|
||||||
|
find "$stack_path" -name "*.env" -o -name ".env*" | while IFS= read -r env_file; do
|
||||||
|
if [ -n "$env_file" ]; then
|
||||||
|
local rel_path="${env_file#$DOCKER_DIR/}"
|
||||||
|
local dest_dir="$temp_dir/$(dirname "$rel_path")"
|
||||||
|
|
||||||
|
if [ "$dry_run" = "true" ]; then
|
||||||
|
echo -e "${BLUE}Would sync: $rel_path${NC}"
|
||||||
|
else
|
||||||
|
mkdir -p "$dest_dir"
|
||||||
|
cp "$env_file" "$dest_dir/"
|
||||||
|
echo -e "${GREEN}✓ Prepared: $rel_path${NC}"
|
||||||
|
sync_count=$((sync_count + 1))
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
|
||||||
|
# Also sync docker-compose.yml
|
||||||
|
local compose_file="$stack_path/docker-compose.yml"
|
||||||
|
if [ -f "$compose_file" ]; then
|
||||||
|
local rel_path="${compose_file#$DOCKER_DIR/}"
|
||||||
|
local dest_dir="$temp_dir/$(dirname "$rel_path")"
|
||||||
|
|
||||||
|
if [ "$dry_run" = "true" ]; then
|
||||||
|
echo -e "${BLUE}Would sync: $rel_path${NC}"
|
||||||
|
else
|
||||||
|
mkdir -p "$dest_dir"
|
||||||
|
cp "$compose_file" "$dest_dir/"
|
||||||
|
echo -e "${GREEN}✓ Prepared: $rel_path${NC}"
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
else
|
||||||
|
echo -e "${YELLOW}Warning: Stack directory not found: $stack_path${NC}"
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
|
||||||
|
if [ "$dry_run" != "true" ]; then
|
||||||
|
# Use rsync to sync to server (assumes SSH access)
|
||||||
|
echo -e "${YELLOW}Transferring files to $server...${NC}"
|
||||||
|
|
||||||
|
# This would be the actual rsync command (commented for safety)
|
||||||
|
# rsync -avz --delete "$temp_dir/" "acedanger@$server:~/docker/"
|
||||||
|
|
||||||
|
echo -e "${GREEN}Environment sync simulation completed for $server${NC}"
|
||||||
|
echo -e "${BLUE}Files prepared in: $temp_dir${NC}"
|
||||||
|
echo "To actually sync, you would run:"
|
||||||
|
echo " rsync -avz --delete '$temp_dir/' 'acedanger@$server:~/docker/'"
|
||||||
|
|
||||||
|
# Clean up temp directory
|
||||||
|
# rm -rf "$temp_dir"
|
||||||
|
fi
|
||||||
|
|
||||||
|
log "Environment sync completed for $server - $sync_count files prepared"
|
||||||
|
}
|
||||||
|
|
||||||
|
# Deploy stacks to server
|
||||||
|
deploy_to_server() {
|
||||||
|
local server="$1"
|
||||||
|
local dry_run="$2"
|
||||||
|
local force="$3"
|
||||||
|
|
||||||
|
echo -e "${YELLOW}Deploying Docker stacks to $server...${NC}"
|
||||||
|
|
||||||
|
# First sync environment files
|
||||||
|
sync_env_files "$server" "$dry_run"
|
||||||
|
|
||||||
|
if [ "$dry_run" = "true" ]; then
|
||||||
|
echo -e "${BLUE}Dry run completed for $server${NC}"
|
||||||
|
return 0
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Get stacks for this server
|
||||||
|
local stacks=$(load_server_config "$server")
|
||||||
|
|
||||||
|
if [ -z "$stacks" ]; then
|
||||||
|
echo -e "${YELLOW}No stacks configured for server $server${NC}"
|
||||||
|
return 0
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo -e "${GREEN}Stacks to deploy on $server:${NC}"
|
||||||
|
echo "$stacks" | sed 's/^/ - /'
|
||||||
|
|
||||||
|
# Here you would implement the actual deployment logic
|
||||||
|
# This could involve:
|
||||||
|
# 1. SSH to the server
|
||||||
|
# 2. Pull the latest compose files
|
||||||
|
# 3. Run docker-compose up -d for each stack
|
||||||
|
# 4. Perform health checks
|
||||||
|
# 5. Send notifications
|
||||||
|
|
||||||
|
echo -e "${GREEN}Deployment simulation completed for $server${NC}"
|
||||||
|
|
||||||
|
# Send notification (using your existing ntfy setup)
|
||||||
|
if command -v curl >/dev/null 2>&1; then
|
||||||
|
curl -s \
|
||||||
|
-H "priority:default" \
|
||||||
|
-H "tags:deployment,docker,$server" \
|
||||||
|
-d "Deployed Docker stacks to $server: $(echo "$stacks" | tr '\n' ', ' | sed 's/, $//')" \
|
||||||
|
"https://notify.peterwood.rocks/lab" >/dev/null || true
|
||||||
|
fi
|
||||||
|
|
||||||
|
log "Deployment completed for $server"
|
||||||
|
}
|
||||||
|
|
||||||
|
# Deploy all stacks to their designated servers
|
||||||
|
deploy_all() {
|
||||||
|
local dry_run="$1"
|
||||||
|
|
||||||
|
echo -e "${BLUE}=== Deploying All Stacks to Designated Servers ===${NC}"
|
||||||
|
|
||||||
|
for server_file in "$DEPLOYMENT_CONFIG_DIR/servers/"*.yml; do
|
||||||
|
if [ -f "$server_file" ]; then
|
||||||
|
local server=$(basename "$server_file" .yml)
|
||||||
|
echo ""
|
||||||
|
deploy_to_server "$server" "$dry_run"
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
|
||||||
|
echo ""
|
||||||
|
echo -e "${GREEN}All deployments completed!${NC}"
|
||||||
|
}
|
||||||
|
|
||||||
|
# Check deployment status
|
||||||
|
check_status() {
|
||||||
|
local server="$1"
|
||||||
|
|
||||||
|
echo -e "${BLUE}=== Deployment Status for $server ===${NC}"
|
||||||
|
|
||||||
|
# This would check the actual status on the server
|
||||||
|
# For now, we'll simulate it
|
||||||
|
echo -e "${GREEN}✅ Server is reachable${NC}"
|
||||||
|
echo -e "${GREEN}✅ Docker is running${NC}"
|
||||||
|
echo -e "${GREEN}✅ All stacks are healthy${NC}"
|
||||||
|
|
||||||
|
# Get stacks for this server
|
||||||
|
local stacks=$(load_server_config "$server")
|
||||||
|
if [ -n "$stacks" ]; then
|
||||||
|
echo ""
|
||||||
|
echo -e "${BLUE}Configured stacks:${NC}"
|
||||||
|
echo "$stacks" | while IFS= read -r stack; do
|
||||||
|
if [ -n "$stack" ]; then
|
||||||
|
echo -e " ${GREEN}✅${NC} $stack"
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
# Main function
|
||||||
|
main() {
|
||||||
|
local command=""
|
||||||
|
local dry_run=false
|
||||||
|
local force=false
|
||||||
|
local verbose=false
|
||||||
|
local config_only=false
|
||||||
|
local env_only=false
|
||||||
|
|
||||||
|
# Parse command line arguments
|
||||||
|
while [[ $# -gt 0 ]]; do
|
||||||
|
case $1 in
|
||||||
|
-h|--help)
|
||||||
|
usage
|
||||||
|
exit 0
|
||||||
|
;;
|
||||||
|
-d|--dry-run)
|
||||||
|
dry_run=true
|
||||||
|
shift
|
||||||
|
;;
|
||||||
|
-f|--force)
|
||||||
|
force=true
|
||||||
|
shift
|
||||||
|
;;
|
||||||
|
-v|--verbose)
|
||||||
|
verbose=true
|
||||||
|
shift
|
||||||
|
;;
|
||||||
|
--config-only)
|
||||||
|
config_only=true
|
||||||
|
shift
|
||||||
|
;;
|
||||||
|
--env-only)
|
||||||
|
env_only=true
|
||||||
|
shift
|
||||||
|
;;
|
||||||
|
init|map|deploy-all|status)
|
||||||
|
command="$1"
|
||||||
|
shift
|
||||||
|
;;
|
||||||
|
deploy|sync-env|rollback)
|
||||||
|
command="$1"
|
||||||
|
if [[ $# -gt 1 && ! "$2" =~ ^- ]]; then
|
||||||
|
server="$2"
|
||||||
|
shift 2
|
||||||
|
else
|
||||||
|
echo -e "${RED}Error: Command '$1' requires a server name${NC}"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
;;
|
||||||
|
*)
|
||||||
|
echo "Unknown option: $1"
|
||||||
|
usage
|
||||||
|
exit 1
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
done
|
||||||
|
|
||||||
|
# Execute requested command
|
||||||
|
case "$command" in
|
||||||
|
init)
|
||||||
|
init_deployment_config
|
||||||
|
;;
|
||||||
|
map)
|
||||||
|
show_mapping
|
||||||
|
;;
|
||||||
|
deploy)
|
||||||
|
deploy_to_server "$server" "$dry_run" "$force"
|
||||||
|
;;
|
||||||
|
deploy-all)
|
||||||
|
deploy_all "$dry_run"
|
||||||
|
;;
|
||||||
|
sync-env)
|
||||||
|
sync_env_files "$server" "$dry_run"
|
||||||
|
;;
|
||||||
|
status)
|
||||||
|
check_status "$server"
|
||||||
|
;;
|
||||||
|
rollback)
|
||||||
|
echo -e "${YELLOW}Rollback functionality not yet implemented${NC}"
|
||||||
|
;;
|
||||||
|
"")
|
||||||
|
echo -e "${RED}Error: No command specified${NC}"
|
||||||
|
usage
|
||||||
|
exit 1
|
||||||
|
;;
|
||||||
|
*)
|
||||||
|
echo -e "${RED}Error: Unknown command '$command'${NC}"
|
||||||
|
usage
|
||||||
|
exit 1
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
}
|
||||||
|
|
||||||
|
# Run main function with all arguments
|
||||||
|
main "$@"
|
||||||
181
env-backup-integration.sh.sc2162_backup
Executable file
181
env-backup-integration.sh.sc2162_backup
Executable file
@@ -0,0 +1,181 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
# env-backup-integration.sh - Integration script for adding .env backup to existing backup system
|
||||||
|
# Author: Shell Repository
|
||||||
|
# Description: Add .env backup functionality to existing backup scripts
|
||||||
|
|
||||||
|
# Colors for output
|
||||||
|
GREEN='\033[0;32m'
|
||||||
|
YELLOW='\033[0;33m'
|
||||||
|
RED='\033[0;31m'
|
||||||
|
BLUE='\033[0;34m'
|
||||||
|
NC='\033[0m' # No Color
|
||||||
|
|
||||||
|
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||||
|
|
||||||
|
echo -e "${BLUE}=== Environment Files Backup Integration ===${NC}"
|
||||||
|
|
||||||
|
# Function to add .env backup to a script
|
||||||
|
integrate_env_backup() {
|
||||||
|
local target_script="$1"
|
||||||
|
local integration_point="$2"
|
||||||
|
|
||||||
|
if [ ! -f "$target_script" ]; then
|
||||||
|
echo -e "${YELLOW}Target script not found: $target_script${NC}"
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Check if already integrated
|
||||||
|
if grep -q "backup-env-files.sh" "$target_script"; then
|
||||||
|
echo -e "${GREEN}✓ Already integrated with $target_script${NC}"
|
||||||
|
return 0
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo -e "${YELLOW}Integrating with $target_script...${NC}"
|
||||||
|
|
||||||
|
# Create backup of original script
|
||||||
|
cp "$target_script" "$target_script.backup"
|
||||||
|
|
||||||
|
# Integration code
|
||||||
|
local integration_code="
|
||||||
|
# === Environment Files Backup Integration ===
|
||||||
|
echo -e \"\${YELLOW}Backing up environment files...\${NC}\"
|
||||||
|
if [ -f \"\$SCRIPT_DIR/backup-env-files.sh\" ]; then
|
||||||
|
if \"\$SCRIPT_DIR/backup-env-files.sh\"; then
|
||||||
|
echo -e \"\${GREEN}✓ Environment files backed up successfully\${NC}\"
|
||||||
|
else
|
||||||
|
echo -e \"\${YELLOW}Warning: Environment files backup had issues\${NC}\"
|
||||||
|
fi
|
||||||
|
else
|
||||||
|
echo -e \"\${YELLOW}Warning: backup-env-files.sh not found\${NC}\"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Validate the backup
|
||||||
|
if [ -f \"\$SCRIPT_DIR/validate-env-backups.sh\" ]; then
|
||||||
|
if \"\$SCRIPT_DIR/validate-env-backups.sh\" --summary-only; then
|
||||||
|
echo -e \"\${GREEN}✓ Environment backup validation passed\${NC}\"
|
||||||
|
else
|
||||||
|
echo -e \"\${YELLOW}Warning: Environment backup validation failed\${NC}\"
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
echo \"\"
|
||||||
|
# === End Environment Files Backup Integration ===
|
||||||
|
"
|
||||||
|
|
||||||
|
# Add integration based on integration point
|
||||||
|
case "$integration_point" in
|
||||||
|
"after_docker")
|
||||||
|
# Add after Docker backup section
|
||||||
|
if grep -q "docker" "$target_script" || grep -q "backup.*docker" "$target_script"; then
|
||||||
|
# Find a good insertion point after docker backup
|
||||||
|
local line_num=$(grep -n -i "docker.*backup\|backup.*docker" "$target_script" | tail -1 | cut -d: -f1)
|
||||||
|
if [ -n "$line_num" ]; then
|
||||||
|
sed -i "${line_num}a\\${integration_code}" "$target_script"
|
||||||
|
echo -e "${GREEN}✓ Integrated after Docker backup section${NC}"
|
||||||
|
else
|
||||||
|
echo -e "${YELLOW}Could not find Docker backup section, adding at end${NC}"
|
||||||
|
echo "$integration_code" >> "$target_script"
|
||||||
|
fi
|
||||||
|
else
|
||||||
|
echo -e "${YELLOW}No Docker backup section found, adding at end${NC}"
|
||||||
|
echo "$integration_code" >> "$target_script"
|
||||||
|
fi
|
||||||
|
;;
|
||||||
|
"before_end")
|
||||||
|
# Add before the end of the script
|
||||||
|
local last_line=$(wc -l < "$target_script")
|
||||||
|
sed -i "${last_line}i\\${integration_code}" "$target_script"
|
||||||
|
echo -e "${GREEN}✓ Integrated before end of script${NC}"
|
||||||
|
;;
|
||||||
|
"manual")
|
||||||
|
echo -e "${BLUE}Manual integration code:${NC}"
|
||||||
|
echo "$integration_code"
|
||||||
|
echo -e "${YELLOW}Please add this code manually to your script at the appropriate location${NC}"
|
||||||
|
;;
|
||||||
|
*)
|
||||||
|
echo -e "${YELLOW}Unknown integration point, adding at end${NC}"
|
||||||
|
echo "$integration_code" >> "$target_script"
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
|
||||||
|
echo -e "${GREEN}Integration completed. Backup saved as $target_script.backup${NC}"
|
||||||
|
}
|
||||||
|
|
||||||
|
# Find and integrate with existing backup scripts
|
||||||
|
echo -e "${YELLOW}Scanning for backup scripts to integrate with...${NC}"
|
||||||
|
|
||||||
|
# Common backup script patterns
|
||||||
|
declare -a backup_scripts=(
|
||||||
|
"$SCRIPT_DIR/backup-docker.sh"
|
||||||
|
"$SCRIPT_DIR/backup-media.sh"
|
||||||
|
"$SCRIPT_DIR/update.sh"
|
||||||
|
"$SCRIPT_DIR/backup.sh"
|
||||||
|
"$SCRIPT_DIR/daily-backup.sh"
|
||||||
|
)
|
||||||
|
|
||||||
|
found_scripts=()
|
||||||
|
|
||||||
|
for script in "${backup_scripts[@]}"; do
|
||||||
|
if [ -f "$script" ]; then
|
||||||
|
found_scripts+=("$script")
|
||||||
|
echo -e "${GREEN}Found: $(basename "$script")${NC}"
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
|
||||||
|
if [ ${#found_scripts[@]} -eq 0 ]; then
|
||||||
|
echo -e "${YELLOW}No backup scripts found to integrate with${NC}"
|
||||||
|
echo -e "${BLUE}You can manually add the .env backup to your backup routine:${NC}"
|
||||||
|
echo ""
|
||||||
|
echo "# Add to your backup script:"
|
||||||
|
echo "$SCRIPT_DIR/backup-env-files.sh"
|
||||||
|
echo "$SCRIPT_DIR/validate-env-backups.sh --summary-only"
|
||||||
|
echo ""
|
||||||
|
else
|
||||||
|
echo -e "${BLUE}Select scripts to integrate with (or 'all' for all, 'none' to skip):${NC}"
|
||||||
|
for i in "${!found_scripts[@]}"; do
|
||||||
|
echo "$((i+1)). $(basename "${found_scripts[$i]}")"
|
||||||
|
done
|
||||||
|
echo ""
|
||||||
|
|
||||||
|
read -p "Enter your choice: " choice
|
||||||
|
|
||||||
|
case "$choice" in
|
||||||
|
"all")
|
||||||
|
for script in "${found_scripts[@]}"; do
|
||||||
|
integrate_env_backup "$script" "after_docker"
|
||||||
|
done
|
||||||
|
;;
|
||||||
|
"none")
|
||||||
|
echo -e "${YELLOW}Skipping integration${NC}"
|
||||||
|
;;
|
||||||
|
[0-9]*)
|
||||||
|
if [ "$choice" -ge 1 ] && [ "$choice" -le ${#found_scripts[@]} ]; then
|
||||||
|
script_index=$((choice-1))
|
||||||
|
integrate_env_backup "${found_scripts[$script_index]}" "after_docker"
|
||||||
|
else
|
||||||
|
echo -e "${RED}Invalid choice${NC}"
|
||||||
|
fi
|
||||||
|
;;
|
||||||
|
*)
|
||||||
|
echo -e "${RED}Invalid choice${NC}"
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Create a simple cron entry suggestion
|
||||||
|
echo -e "${BLUE}=== Automation Suggestions ===${NC}"
|
||||||
|
echo "Add to crontab for automated backups:"
|
||||||
|
echo ""
|
||||||
|
echo "# Daily .env backup at 2 AM"
|
||||||
|
echo "0 2 * * * $SCRIPT_DIR/backup-env-files.sh >/dev/null 2>&1"
|
||||||
|
echo ""
|
||||||
|
echo "# Weekly validation on Sundays at 3 AM"
|
||||||
|
echo "0 3 * * 0 $SCRIPT_DIR/validate-env-backups.sh --summary-only"
|
||||||
|
echo ""
|
||||||
|
|
||||||
|
echo -e "${GREEN}Integration setup completed!${NC}"
|
||||||
|
echo -e "${BLUE}Next steps:${NC}"
|
||||||
|
echo "1. Run: $SCRIPT_DIR/backup-env-files.sh --init"
|
||||||
|
echo "2. Create private repository in Gitea"
|
||||||
|
echo "3. Run first backup: $SCRIPT_DIR/backup-env-files.sh"
|
||||||
|
echo "4. Test restoration: $SCRIPT_DIR/backup-env-files.sh --restore"
|
||||||
@@ -1,3 +1,5 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
# Check for wget, if not installed, install it
|
# Check for wget, if not installed, install it
|
||||||
(type -p wget >/dev/null || (sudo apt update && sudo apt-get install wget -y)) \
|
(type -p wget >/dev/null || (sudo apt update && sudo apt-get install wget -y)) \
|
||||||
&& sudo mkdir -p -m 755 /etc/apt/keyrings
|
&& sudo mkdir -p -m 755 /etc/apt/keyrings
|
||||||
|
|||||||
@@ -35,6 +35,7 @@ trap cleanup EXIT SIGINT SIGTERM
|
|||||||
ENV_FILE="$(dirname "$0")/../.env"
|
ENV_FILE="$(dirname "$0")/../.env"
|
||||||
if [ -f "$ENV_FILE" ]; then
|
if [ -f "$ENV_FILE" ]; then
|
||||||
echo "Loading environment variables from $ENV_FILE"
|
echo "Loading environment variables from $ENV_FILE"
|
||||||
|
# shellcheck source=/dev/null
|
||||||
source "$ENV_FILE"
|
source "$ENV_FILE"
|
||||||
else
|
else
|
||||||
echo "Error: .env file not found in $(dirname "$0")/.."
|
echo "Error: .env file not found in $(dirname "$0")/.."
|
||||||
@@ -133,7 +134,8 @@ send_notification() {
|
|||||||
local title="$1"
|
local title="$1"
|
||||||
local message="$2"
|
local message="$2"
|
||||||
local status="${3:-info}" # success, error, warning, info
|
local status="${3:-info}" # success, error, warning, info
|
||||||
local hostname=$(hostname)
|
local hostname
|
||||||
|
hostname=$(hostname)
|
||||||
|
|
||||||
# Console notification
|
# Console notification
|
||||||
log_message "$title: $message"
|
log_message "$title: $message"
|
||||||
@@ -157,7 +159,8 @@ send_notification() {
|
|||||||
# Function to upload to Backblaze B2
|
# Function to upload to Backblaze B2
|
||||||
upload_to_b2() {
|
upload_to_b2() {
|
||||||
local file_path="$1"
|
local file_path="$1"
|
||||||
local filename=$(basename "$file_path")
|
local filename
|
||||||
|
filename=$(basename "$file_path")
|
||||||
|
|
||||||
# Check if B2 is configured
|
# Check if B2 is configured
|
||||||
if [ -z "$B2_APPLICATION_KEY_ID" ] || [ -z "$B2_APPLICATION_KEY" ] || [ -z "$B2_BUCKET_NAME" ]; then
|
if [ -z "$B2_APPLICATION_KEY_ID" ] || [ -z "$B2_APPLICATION_KEY" ] || [ -z "$B2_BUCKET_NAME" ]; then
|
||||||
@@ -344,14 +347,11 @@ echo ""
|
|||||||
echo "=== PHASE 1: DATABASE BACKUP ==="
|
echo "=== PHASE 1: DATABASE BACKUP ==="
|
||||||
log_message "Taking database backup using pg_dumpall as recommended by Immich documentation..."
|
log_message "Taking database backup using pg_dumpall as recommended by Immich documentation..."
|
||||||
# Use pg_dumpall with recommended flags: --clean and --if-exists
|
# Use pg_dumpall with recommended flags: --clean and --if-exists
|
||||||
docker exec -t immich_postgres pg_dumpall \
|
if ! docker exec -t immich_postgres pg_dumpall \
|
||||||
--clean \
|
--clean \
|
||||||
--if-exists \
|
--if-exists \
|
||||||
--username="${DB_USERNAME}" \
|
--username="${DB_USERNAME}" \
|
||||||
> "${DB_BACKUP_PATH}"
|
> "${DB_BACKUP_PATH}" || [ ! -s "${DB_BACKUP_PATH}" ]; then
|
||||||
|
|
||||||
# Check if the dump was successful
|
|
||||||
if [ $? -ne 0 ] || [ ! -s "${DB_BACKUP_PATH}" ]; then
|
|
||||||
log_message "Error: Database backup failed or created an empty file."
|
log_message "Error: Database backup failed or created an empty file."
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|||||||
@@ -8,7 +8,6 @@ set -e
|
|||||||
# Colors for output
|
# Colors for output
|
||||||
RED='\033[0;31m'
|
RED='\033[0;31m'
|
||||||
GREEN='\033[0;32m'
|
GREEN='\033[0;32m'
|
||||||
YELLOW='\033[0;33m'
|
|
||||||
BLUE='\033[0;34m'
|
BLUE='\033[0;34m'
|
||||||
NC='\033[0m' # No Color
|
NC='\033[0m' # No Color
|
||||||
|
|
||||||
@@ -215,7 +214,8 @@ test_env_validation() {
|
|||||||
log_test "Environment Variable Validation"
|
log_test "Environment Variable Validation"
|
||||||
|
|
||||||
# Temporarily move .env file to test missing env
|
# Temporarily move .env file to test missing env
|
||||||
local env_file="$(dirname "$RESTORE_SCRIPT")/../.env"
|
local env_file
|
||||||
|
env_file="$(dirname "$RESTORE_SCRIPT")/../.env"
|
||||||
if [ -f "$env_file" ]; then
|
if [ -f "$env_file" ]; then
|
||||||
mv "$env_file" "${env_file}.backup"
|
mv "$env_file" "${env_file}.backup"
|
||||||
|
|
||||||
@@ -285,11 +285,12 @@ test_logging() {
|
|||||||
TESTS_RUN=$((TESTS_RUN + 1))
|
TESTS_RUN=$((TESTS_RUN + 1))
|
||||||
log_test "Logging Functionality"
|
log_test "Logging Functionality"
|
||||||
|
|
||||||
local log_dir="$(dirname "$RESTORE_SCRIPT")/../logs"
|
local log_dir
|
||||||
|
log_dir="$(dirname "$RESTORE_SCRIPT")/../logs"
|
||||||
local restore_log="${log_dir}/immich-restore.log"
|
local restore_log="${log_dir}/immich-restore.log"
|
||||||
|
|
||||||
# Clear previous log entries
|
# Clear previous log entries
|
||||||
[ -f "$restore_log" ] && > "$restore_log"
|
[ -f "$restore_log" ] && true > "$restore_log"
|
||||||
|
|
||||||
local mock_db_backup="${TEST_DIR}/mock_immich_db_backup_20250603_120000.sql.gz"
|
local mock_db_backup="${TEST_DIR}/mock_immich_db_backup_20250603_120000.sql.gz"
|
||||||
local mock_uploads_backup="${TEST_DIR}/mock_immich_uploads_20250603_120000.tar.gz"
|
local mock_uploads_backup="${TEST_DIR}/mock_immich_uploads_20250603_120000.tar.gz"
|
||||||
@@ -330,10 +331,12 @@ test_performance() {
|
|||||||
done
|
done
|
||||||
tar -czf "$large_uploads_backup" -C "${TEST_DIR}" large_mock_uploads
|
tar -czf "$large_uploads_backup" -C "${TEST_DIR}" large_mock_uploads
|
||||||
|
|
||||||
local start_time=$(date +%s)
|
local start_time
|
||||||
|
start_time=$(date +%s)
|
||||||
local output
|
local output
|
||||||
if output=$("$RESTORE_SCRIPT" --db-backup "$large_db_backup" --uploads-backup "$large_uploads_backup" --dry-run 2>&1); then
|
if output=$("$RESTORE_SCRIPT" --db-backup "$large_db_backup" --uploads-backup "$large_uploads_backup" --dry-run 2>&1); then
|
||||||
local end_time=$(date +%s)
|
local end_time
|
||||||
|
end_time=$(date +%s)
|
||||||
local duration=$((end_time - start_time))
|
local duration=$((end_time - start_time))
|
||||||
|
|
||||||
if [ $duration -lt 30 ]; then # Should complete dry run in under 30 seconds
|
if [ $duration -lt 30 ]; then # Should complete dry run in under 30 seconds
|
||||||
|
|||||||
@@ -1,28 +1,19 @@
|
|||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
|
|
||||||
if [ ! "$(expr substr $(uname -s) 1 5)" == 'Linux' ]; then
|
uname_str=$(uname -s)
|
||||||
|
if [ ! "${uname_str:0:5}" == 'Linux' ]; then
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
source="/home/acedanger/backup/docker-data/"
|
source="/home/acedanger/backup/docker-data/"
|
||||||
destination="/mnt/share/media/backups/docker-data/"
|
destination="/mnt/share/media/backups/docker-data/"
|
||||||
|
|
||||||
command="rsync \
|
|
||||||
--archive --verbose --progress --dry-run --stats \
|
|
||||||
-e 'ssh' \
|
|
||||||
--include '*.gz' \
|
|
||||||
acedanger@ts-racknerd:$source $destination \
|
|
||||||
| fgrep 'Number of files' | cut -d' ' -f4 | tr -d ,"
|
|
||||||
|
|
||||||
# echo 'Running command => '
|
|
||||||
# echo $command
|
|
||||||
|
|
||||||
num_files=$(rsync \
|
num_files=$(rsync \
|
||||||
--archive --verbose --progress --dry-run --stats \
|
--archive --verbose --progress --dry-run --stats \
|
||||||
-e 'ssh' \
|
-e 'ssh' \
|
||||||
--include '*.gz' \
|
--include '*.gz' \
|
||||||
acedanger@ts-racknerd:$source $destination \
|
acedanger@ts-racknerd:"$source" "$destination" \
|
||||||
| fgrep 'Number of files' | cut -d' ' -f4 | tr -d ,
|
| grep -F 'Number of files' | cut -d' ' -f4 | tr -d ,
|
||||||
)
|
)
|
||||||
|
|
||||||
# echo 'There are' "${num_files}" 'file(s) to be transferred.'
|
# echo 'There are' "${num_files}" 'file(s) to be transferred.'
|
||||||
@@ -43,6 +34,6 @@ rsync \
|
|||||||
# send a notification to https://notify.peterwood.rocks/lab
|
# send a notification to https://notify.peterwood.rocks/lab
|
||||||
curl \
|
curl \
|
||||||
-H priority:default \
|
-H priority:default \
|
||||||
-H tags:backups,${HOSTNAME} \
|
-H tags:backups,"${HOSTNAME}" \
|
||||||
-d "The backups have been moved to the NAS." \
|
-d "The backups have been moved to the NAS." \
|
||||||
https://notify.peterwood.rocks/lab
|
https://notify.peterwood.rocks/lab
|
||||||
|
|||||||
@@ -57,12 +57,7 @@ BLUE='\033[0;34m'
|
|||||||
CYAN='\033[0;36m'
|
CYAN='\033[0;36m'
|
||||||
NC='\033[0m' # No Color
|
NC='\033[0m' # No Color
|
||||||
|
|
||||||
# Performance tracking variables
|
# Performance tracking variables (removed unused variables)
|
||||||
SCRIPT_START_TIME=$(date +%s)
|
|
||||||
BACKUP_START_TIME=""
|
|
||||||
VERIFICATION_START_TIME=""
|
|
||||||
SERVICE_STOP_TIME=""
|
|
||||||
SERVICE_START_TIME=""
|
|
||||||
|
|
||||||
# Configuration
|
# Configuration
|
||||||
MAX_BACKUP_AGE_DAYS=30
|
MAX_BACKUP_AGE_DAYS=30
|
||||||
@@ -168,7 +163,8 @@ declare -A PLEX_FILES=(
|
|||||||
# Logging functions
|
# Logging functions
|
||||||
log_message() {
|
log_message() {
|
||||||
local message="$1"
|
local message="$1"
|
||||||
local timestamp=$(date '+%Y-%m-%d %H:%M:%S')
|
local timestamp
|
||||||
|
timestamp=$(date '+%Y-%m-%d %H:%M:%S')
|
||||||
echo -e "${CYAN}[${timestamp}]${NC} ${message}"
|
echo -e "${CYAN}[${timestamp}]${NC} ${message}"
|
||||||
mkdir -p "$LOCAL_LOG_ROOT"
|
mkdir -p "$LOCAL_LOG_ROOT"
|
||||||
echo "[${timestamp}] $message" >> "${LOCAL_LOG_ROOT}/plex-backup-$(date '+%Y-%m-%d').log" 2>/dev/null || true
|
echo "[${timestamp}] $message" >> "${LOCAL_LOG_ROOT}/plex-backup-$(date '+%Y-%m-%d').log" 2>/dev/null || true
|
||||||
@@ -176,7 +172,8 @@ log_message() {
|
|||||||
|
|
||||||
log_error() {
|
log_error() {
|
||||||
local message="$1"
|
local message="$1"
|
||||||
local timestamp=$(date '+%Y-%m-%d %H:%M:%S')
|
local timestamp
|
||||||
|
timestamp=$(date '+%Y-%m-%d %H:%M:%S')
|
||||||
echo -e "${RED}[${timestamp}] ERROR:${NC} ${message}"
|
echo -e "${RED}[${timestamp}] ERROR:${NC} ${message}"
|
||||||
mkdir -p "$LOCAL_LOG_ROOT"
|
mkdir -p "$LOCAL_LOG_ROOT"
|
||||||
echo "[${timestamp}] ERROR: $message" >> "${LOCAL_LOG_ROOT}/plex-backup-$(date '+%Y-%m-%d').log" 2>/dev/null || true
|
echo "[${timestamp}] ERROR: $message" >> "${LOCAL_LOG_ROOT}/plex-backup-$(date '+%Y-%m-%d').log" 2>/dev/null || true
|
||||||
@@ -184,7 +181,8 @@ log_error() {
|
|||||||
|
|
||||||
log_success() {
|
log_success() {
|
||||||
local message="$1"
|
local message="$1"
|
||||||
local timestamp=$(date '+%Y-%m-%d %H:%M:%S')
|
local timestamp
|
||||||
|
timestamp=$(date '+%Y-%m-%d %H:%M:%S')
|
||||||
echo -e "${GREEN}[${timestamp}] SUCCESS:${NC} ${message}"
|
echo -e "${GREEN}[${timestamp}] SUCCESS:${NC} ${message}"
|
||||||
mkdir -p "$LOCAL_LOG_ROOT"
|
mkdir -p "$LOCAL_LOG_ROOT"
|
||||||
echo "[${timestamp}] SUCCESS: $message" >> "${LOCAL_LOG_ROOT}/plex-backup-$(date '+%Y-%m-%d').log" 2>/dev/null || true
|
echo "[${timestamp}] SUCCESS: $message" >> "${LOCAL_LOG_ROOT}/plex-backup-$(date '+%Y-%m-%d').log" 2>/dev/null || true
|
||||||
@@ -192,7 +190,8 @@ log_success() {
|
|||||||
|
|
||||||
log_warning() {
|
log_warning() {
|
||||||
local message="$1"
|
local message="$1"
|
||||||
local timestamp=$(date '+%Y-%m-%d %H:%M:%S')
|
local timestamp
|
||||||
|
timestamp=$(date '+%Y-%m-%d %H:%M:%S')
|
||||||
echo -e "${YELLOW}[${timestamp}] WARNING:${NC} ${message}"
|
echo -e "${YELLOW}[${timestamp}] WARNING:${NC} ${message}"
|
||||||
mkdir -p "$LOCAL_LOG_ROOT"
|
mkdir -p "$LOCAL_LOG_ROOT"
|
||||||
echo "[${timestamp}] WARNING: $message" >> "${LOCAL_LOG_ROOT}/plex-backup-$(date '+%Y-%m-%d').log" 2>/dev/null || true
|
echo "[${timestamp}] WARNING: $message" >> "${LOCAL_LOG_ROOT}/plex-backup-$(date '+%Y-%m-%d').log" 2>/dev/null || true
|
||||||
@@ -200,7 +199,8 @@ log_warning() {
|
|||||||
|
|
||||||
log_info() {
|
log_info() {
|
||||||
local message="$1"
|
local message="$1"
|
||||||
local timestamp=$(date '+%Y-%m-%d %H:%M:%S')
|
local timestamp
|
||||||
|
timestamp=$(date '+%Y-%m-%d %H:%M:%S')
|
||||||
echo -e "${BLUE}[${timestamp}] INFO:${NC} ${message}"
|
echo -e "${BLUE}[${timestamp}] INFO:${NC} ${message}"
|
||||||
mkdir -p "$LOCAL_LOG_ROOT"
|
mkdir -p "$LOCAL_LOG_ROOT"
|
||||||
echo "[${timestamp}] INFO: $message" >> "${LOCAL_LOG_ROOT}/plex-backup-$(date '+%Y-%m-%d').log" 2>/dev/null || true
|
echo "[${timestamp}] INFO: $message" >> "${LOCAL_LOG_ROOT}/plex-backup-$(date '+%Y-%m-%d').log" 2>/dev/null || true
|
||||||
@@ -224,7 +224,8 @@ track_performance() {
|
|||||||
fi
|
fi
|
||||||
|
|
||||||
# Add performance entry
|
# Add performance entry
|
||||||
local entry=$(jq -n \
|
local entry
|
||||||
|
entry=$(jq -n \
|
||||||
--arg operation "$operation" \
|
--arg operation "$operation" \
|
||||||
--arg duration "$duration" \
|
--arg duration "$duration" \
|
||||||
--arg timestamp "$(date -Iseconds)" \
|
--arg timestamp "$(date -Iseconds)" \
|
||||||
@@ -251,7 +252,8 @@ initialize_logs() {
|
|||||||
|
|
||||||
# Log synchronization functions
|
# Log synchronization functions
|
||||||
sync_logs_to_shared() {
|
sync_logs_to_shared() {
|
||||||
local sync_start_time=$(date +%s)
|
local sync_start_time
|
||||||
|
sync_start_time=$(date +%s)
|
||||||
log_info "Starting log synchronization to shared location"
|
log_info "Starting log synchronization to shared location"
|
||||||
|
|
||||||
# Ensure shared log directory exists
|
# Ensure shared log directory exists
|
||||||
@@ -272,7 +274,8 @@ sync_logs_to_shared() {
|
|||||||
|
|
||||||
for log_file in "$LOCAL_LOG_ROOT"/*.log "$LOCAL_LOG_ROOT"/*.json; do
|
for log_file in "$LOCAL_LOG_ROOT"/*.log "$LOCAL_LOG_ROOT"/*.json; do
|
||||||
if [ -f "$log_file" ]; then
|
if [ -f "$log_file" ]; then
|
||||||
local filename=$(basename "$log_file")
|
local filename
|
||||||
|
filename=$(basename "$log_file")
|
||||||
local shared_file="$SHARED_LOG_ROOT/$filename"
|
local shared_file="$SHARED_LOG_ROOT/$filename"
|
||||||
|
|
||||||
# Only copy if file doesn't exist in shared location or local is newer
|
# Only copy if file doesn't exist in shared location or local is newer
|
||||||
@@ -288,7 +291,8 @@ sync_logs_to_shared() {
|
|||||||
fi
|
fi
|
||||||
done
|
done
|
||||||
|
|
||||||
local sync_end_time=$(date +%s)
|
local sync_end_time
|
||||||
|
sync_end_time=$(date +%s)
|
||||||
local sync_duration=$((sync_end_time - sync_start_time))
|
local sync_duration=$((sync_end_time - sync_start_time))
|
||||||
|
|
||||||
if [ $error_count -eq 0 ]; then
|
if [ $error_count -eq 0 ]; then
|
||||||
@@ -302,7 +306,8 @@ sync_logs_to_shared() {
|
|||||||
|
|
||||||
# Cleanup old local logs (30 day retention)
|
# Cleanup old local logs (30 day retention)
|
||||||
cleanup_old_local_logs() {
|
cleanup_old_local_logs() {
|
||||||
local cleanup_start_time=$(date +%s)
|
local cleanup_start_time
|
||||||
|
cleanup_start_time=$(date +%s)
|
||||||
log_info "Starting cleanup of old local logs (30+ days)"
|
log_info "Starting cleanup of old local logs (30+ days)"
|
||||||
|
|
||||||
if [ ! -d "$LOCAL_LOG_ROOT" ]; then
|
if [ ! -d "$LOCAL_LOG_ROOT" ]; then
|
||||||
@@ -315,7 +320,8 @@ cleanup_old_local_logs() {
|
|||||||
|
|
||||||
# Find and remove log files older than 30 days
|
# Find and remove log files older than 30 days
|
||||||
while IFS= read -r -d '' old_file; do
|
while IFS= read -r -d '' old_file; do
|
||||||
local filename=$(basename "$old_file")
|
local filename
|
||||||
|
filename=$(basename "$old_file")
|
||||||
if rm "$old_file" 2>/dev/null; then
|
if rm "$old_file" 2>/dev/null; then
|
||||||
((cleanup_count++))
|
((cleanup_count++))
|
||||||
log_info "Removed old log: $filename"
|
log_info "Removed old log: $filename"
|
||||||
@@ -327,12 +333,15 @@ cleanup_old_local_logs() {
|
|||||||
|
|
||||||
# Also clean up old performance log entries (keep structure, remove old entries)
|
# Also clean up old performance log entries (keep structure, remove old entries)
|
||||||
if [ -f "$PERFORMANCE_LOG_FILE" ]; then
|
if [ -f "$PERFORMANCE_LOG_FILE" ]; then
|
||||||
local thirty_days_ago=$(date -d '30 days ago' -Iseconds)
|
local thirty_days_ago
|
||||||
|
thirty_days_ago=$(date -d '30 days ago' -Iseconds)
|
||||||
local temp_perf_file="${PERFORMANCE_LOG_FILE}.cleanup.tmp"
|
local temp_perf_file="${PERFORMANCE_LOG_FILE}.cleanup.tmp"
|
||||||
|
|
||||||
if jq --arg cutoff "$thirty_days_ago" '[.[] | select(.timestamp >= $cutoff)]' "$PERFORMANCE_LOG_FILE" > "$temp_perf_file" 2>/dev/null; then
|
if jq --arg cutoff "$thirty_days_ago" '[.[] | select(.timestamp >= $cutoff)]' "$PERFORMANCE_LOG_FILE" > "$temp_perf_file" 2>/dev/null; then
|
||||||
local old_count=$(jq length "$PERFORMANCE_LOG_FILE" 2>/dev/null || echo "0")
|
local old_count
|
||||||
local new_count=$(jq length "$temp_perf_file" 2>/dev/null || echo "0")
|
old_count=$(jq length "$PERFORMANCE_LOG_FILE" 2>/dev/null || echo "0")
|
||||||
|
local new_count
|
||||||
|
new_count=$(jq length "$temp_perf_file" 2>/dev/null || echo "0")
|
||||||
local removed_count=$((old_count - new_count))
|
local removed_count=$((old_count - new_count))
|
||||||
|
|
||||||
if [ "$removed_count" -gt 0 ]; then
|
if [ "$removed_count" -gt 0 ]; then
|
||||||
@@ -349,7 +358,8 @@ cleanup_old_local_logs() {
|
|||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
|
|
||||||
local cleanup_end_time=$(date +%s)
|
local cleanup_end_time
|
||||||
|
cleanup_end_time=$(date +%s)
|
||||||
local cleanup_duration=$((cleanup_end_time - cleanup_start_time))
|
local cleanup_duration=$((cleanup_end_time - cleanup_start_time))
|
||||||
|
|
||||||
if [ $cleanup_count -gt 0 ]; then
|
if [ $cleanup_count -gt 0 ]; then
|
||||||
@@ -366,7 +376,8 @@ send_notification() {
|
|||||||
local title="$1"
|
local title="$1"
|
||||||
local message="$2"
|
local message="$2"
|
||||||
local status="${3:-info}" # success, error, warning, info
|
local status="${3:-info}" # success, error, warning, info
|
||||||
local hostname=$(hostname)
|
local hostname
|
||||||
|
hostname=$(hostname)
|
||||||
|
|
||||||
# Console notification
|
# Console notification
|
||||||
case "$status" in
|
case "$status" in
|
||||||
@@ -412,16 +423,17 @@ format_backed_up_files() {
|
|||||||
local files=("$@")
|
local files=("$@")
|
||||||
local count=${#files[@]}
|
local count=${#files[@]}
|
||||||
|
|
||||||
if [ $count -eq 0 ]; then
|
if [ "$count" -eq 0 ]; then
|
||||||
echo "no files"
|
echo "no files"
|
||||||
elif [ $count -eq 1 ]; then
|
elif [ "$count" -eq 1 ]; then
|
||||||
echo "${files[0]}"
|
echo "${files[0]}"
|
||||||
elif [ $count -eq 2 ]; then
|
elif [ "$count" -eq 2 ]; then
|
||||||
echo "${files[0]} and ${files[1]}"
|
echo "${files[0]} and ${files[1]}"
|
||||||
else
|
else
|
||||||
local last_file="${files[-1]}"
|
local last_file="${files[-1]}"
|
||||||
local other_files=("${files[@]:0:$((count-1))}")
|
local other_files=("${files[@]:0:$((count-1))}")
|
||||||
local other_files_str=$(IFS=', '; echo "${other_files[*]}")
|
local other_files_str
|
||||||
|
other_files_str=$(IFS=', '; echo "${other_files[*]}")
|
||||||
echo "${other_files_str}, and ${last_file}"
|
echo "${other_files_str}, and ${last_file}"
|
||||||
fi
|
fi
|
||||||
}
|
}
|
||||||
@@ -431,17 +443,20 @@ calculate_checksum() {
|
|||||||
local file="$1"
|
local file="$1"
|
||||||
# Use /tmp for cache files to avoid permission issues
|
# Use /tmp for cache files to avoid permission issues
|
||||||
local cache_dir="/tmp/plex-backup-cache"
|
local cache_dir="/tmp/plex-backup-cache"
|
||||||
local cache_file="$cache_dir/$(echo "$file" | sed 's|/|_|g').md5"
|
local cache_file="$cache_dir/${file//\//_}.md5"
|
||||||
local file_mtime=$(stat -c %Y "$file" 2>/dev/null || echo "0")
|
local file_mtime
|
||||||
|
file_mtime=$(stat -c %Y "$file" 2>/dev/null || echo "0")
|
||||||
|
|
||||||
# Create cache directory if it doesn't exist
|
# Create cache directory if it doesn't exist
|
||||||
mkdir -p "$cache_dir" 2>/dev/null || true
|
mkdir -p "$cache_dir" 2>/dev/null || true
|
||||||
|
|
||||||
# Check if cached checksum exists and is newer than file
|
# Check if cached checksum exists and is newer than file
|
||||||
if [ -f "$cache_file" ]; then
|
if [ -f "$cache_file" ]; then
|
||||||
local cache_mtime=$(stat -c %Y "$cache_file" 2>/dev/null || echo "0")
|
local cache_mtime
|
||||||
|
cache_mtime=$(stat -c %Y "$cache_file" 2>/dev/null || echo "0")
|
||||||
if [ "$cache_mtime" -gt "$file_mtime" ]; then
|
if [ "$cache_mtime" -gt "$file_mtime" ]; then
|
||||||
local cached_checksum=$(cat "$cache_file" 2>/dev/null)
|
local cached_checksum
|
||||||
|
cached_checksum=$(cat "$cache_file" 2>/dev/null)
|
||||||
if [[ -n "$cached_checksum" && "$cached_checksum" =~ ^[a-f0-9]{32}$ ]]; then
|
if [[ -n "$cached_checksum" && "$cached_checksum" =~ ^[a-f0-9]{32}$ ]]; then
|
||||||
echo "$cached_checksum"
|
echo "$cached_checksum"
|
||||||
return 0
|
return 0
|
||||||
@@ -480,7 +495,8 @@ calculate_checksum() {
|
|||||||
# Check database integrity using Plex SQLite
|
# Check database integrity using Plex SQLite
|
||||||
check_database_integrity() {
|
check_database_integrity() {
|
||||||
local db_file="$1"
|
local db_file="$1"
|
||||||
local db_name=$(basename "$db_file")
|
local db_name
|
||||||
|
db_name=$(basename "$db_file")
|
||||||
|
|
||||||
log_message "Checking database integrity: $db_name"
|
log_message "Checking database integrity: $db_name"
|
||||||
|
|
||||||
@@ -518,10 +534,13 @@ check_database_integrity() {
|
|||||||
# Advanced database repair using <https://github.com/ChuckPa/DBRepair/> project methods
|
# Advanced database repair using <https://github.com/ChuckPa/DBRepair/> project methods
|
||||||
repair_database() {
|
repair_database() {
|
||||||
local db_file="$1"
|
local db_file="$1"
|
||||||
local db_name=$(basename "$db_file")
|
local db_name
|
||||||
|
db_name=$(basename "$db_file")
|
||||||
local backup_file="${db_file}.pre-repair-backup"
|
local backup_file="${db_file}.pre-repair-backup"
|
||||||
local timestamp=$(date "+%Y-%m-%d_%H.%M.%S")
|
local timestamp
|
||||||
local db_dir=$(dirname "$db_file")
|
timestamp=$(date "+%Y-%m-%d_%H.%M.%S")
|
||||||
|
local db_dir
|
||||||
|
db_dir=$(dirname "$db_file")
|
||||||
local temp_dir="${db_dir}/repair-temp-${timestamp}"
|
local temp_dir="${db_dir}/repair-temp-${timestamp}"
|
||||||
|
|
||||||
log_message "Starting advanced database repair for: $db_name"
|
log_message "Starting advanced database repair for: $db_name"
|
||||||
@@ -554,7 +573,7 @@ repair_database() {
|
|||||||
local new_db_file="${temp_dir}/${db_name}.new"
|
local new_db_file="${temp_dir}/${db_name}.new"
|
||||||
|
|
||||||
log_message "Step 2: Dumping database to SQL..."
|
log_message "Step 2: Dumping database to SQL..."
|
||||||
if sudo "$PLEX_SQLITE" "$db_file" ".dump" > "$dump_file" 2>/dev/null; then
|
if sudo "$PLEX_SQLITE" "$db_file" ".dump" | sudo tee "$dump_file" >/dev/null 2>&1; then
|
||||||
log_success "Database dumped successfully"
|
log_success "Database dumped successfully"
|
||||||
|
|
||||||
log_message "Step 3: Creating new database from dump..."
|
log_message "Step 3: Creating new database from dump..."
|
||||||
@@ -627,7 +646,8 @@ handle_wal_files() {
|
|||||||
)
|
)
|
||||||
|
|
||||||
for wal_file in "${wal_files[@]}"; do
|
for wal_file in "${wal_files[@]}"; do
|
||||||
local wal_basename=$(basename "$wal_file")
|
local wal_basename
|
||||||
|
wal_basename=$(basename "$wal_file")
|
||||||
|
|
||||||
case "$action" in
|
case "$action" in
|
||||||
"backup")
|
"backup")
|
||||||
@@ -670,7 +690,8 @@ handle_wal_files() {
|
|||||||
# Enhanced database integrity check with WAL handling
|
# Enhanced database integrity check with WAL handling
|
||||||
check_database_integrity_with_wal() {
|
check_database_integrity_with_wal() {
|
||||||
local db_file="$1"
|
local db_file="$1"
|
||||||
local db_name=$(basename "$db_file")
|
local db_name
|
||||||
|
db_name=$(basename "$db_file")
|
||||||
|
|
||||||
log_message "Checking database integrity with WAL handling: $db_name"
|
log_message "Checking database integrity with WAL handling: $db_name"
|
||||||
|
|
||||||
@@ -720,14 +741,16 @@ check_database_integrity_with_wal() {
|
|||||||
verify_files_parallel() {
|
verify_files_parallel() {
|
||||||
local backup_dir="$1"
|
local backup_dir="$1"
|
||||||
local -a pids=()
|
local -a pids=()
|
||||||
local temp_dir=$(mktemp -d)
|
local temp_dir
|
||||||
|
temp_dir=$(mktemp -d)
|
||||||
local verification_errors=0
|
local verification_errors=0
|
||||||
|
|
||||||
if [ "$PARALLEL_VERIFICATION" != true ]; then
|
if [ "$PARALLEL_VERIFICATION" != true ]; then
|
||||||
# Fall back to sequential verification
|
# Fall back to sequential verification
|
||||||
for nickname in "${!PLEX_FILES[@]}"; do
|
for nickname in "${!PLEX_FILES[@]}"; do
|
||||||
local src_file="${PLEX_FILES[$nickname]}"
|
local src_file="${PLEX_FILES[$nickname]}"
|
||||||
local dest_file="$backup_dir/$(basename "$src_file")"
|
local dest_file
|
||||||
|
dest_file="$backup_dir/$(basename "$src_file")"
|
||||||
|
|
||||||
if [ -f "$dest_file" ]; then
|
if [ -f "$dest_file" ]; then
|
||||||
if ! verify_backup "$src_file" "$dest_file"; then
|
if ! verify_backup "$src_file" "$dest_file"; then
|
||||||
@@ -743,7 +766,8 @@ verify_files_parallel() {
|
|||||||
# Start verification jobs in parallel
|
# Start verification jobs in parallel
|
||||||
for nickname in "${!PLEX_FILES[@]}"; do
|
for nickname in "${!PLEX_FILES[@]}"; do
|
||||||
local src_file="${PLEX_FILES[$nickname]}"
|
local src_file="${PLEX_FILES[$nickname]}"
|
||||||
local dest_file="$backup_dir/$(basename "$src_file")"
|
local dest_file
|
||||||
|
dest_file="$backup_dir/$(basename "$src_file")"
|
||||||
|
|
||||||
if [ -f "$dest_file" ]; then
|
if [ -f "$dest_file" ]; then
|
||||||
(
|
(
|
||||||
@@ -767,7 +791,8 @@ verify_files_parallel() {
|
|||||||
for nickname in "${!PLEX_FILES[@]}"; do
|
for nickname in "${!PLEX_FILES[@]}"; do
|
||||||
local result_file="$temp_dir/$nickname.result"
|
local result_file="$temp_dir/$nickname.result"
|
||||||
if [ -f "$result_file" ]; then
|
if [ -f "$result_file" ]; then
|
||||||
local result=$(cat "$result_file")
|
local result
|
||||||
|
result=$(cat "$result_file")
|
||||||
if [ "$result" != "0" ]; then
|
if [ "$result" != "0" ]; then
|
||||||
verification_errors=$((verification_errors + 1))
|
verification_errors=$((verification_errors + 1))
|
||||||
fi
|
fi
|
||||||
@@ -861,16 +886,13 @@ verify_backup() {
|
|||||||
# Enhanced service management with better monitoring
|
# Enhanced service management with better monitoring
|
||||||
manage_plex_service() {
|
manage_plex_service() {
|
||||||
local action="$1"
|
local action="$1"
|
||||||
local operation_start=$(date +%s)
|
local operation_start
|
||||||
|
operation_start=$(date +%s)
|
||||||
|
|
||||||
log_message "Managing Plex service: $action"
|
log_message "Managing Plex service: $action"
|
||||||
|
|
||||||
case "$action" in
|
case "$action" in
|
||||||
stop)
|
stop)
|
||||||
if [ "$action" == "stop" ]; then
|
|
||||||
SERVICE_STOP_TIME=$(date +%s)
|
|
||||||
fi
|
|
||||||
|
|
||||||
if sudo systemctl stop plexmediaserver.service; then
|
if sudo systemctl stop plexmediaserver.service; then
|
||||||
log_success "Plex service stopped"
|
log_success "Plex service stopped"
|
||||||
# Wait for clean shutdown with progress indicator
|
# Wait for clean shutdown with progress indicator
|
||||||
@@ -897,10 +919,6 @@ manage_plex_service() {
|
|||||||
fi
|
fi
|
||||||
;;
|
;;
|
||||||
start)
|
start)
|
||||||
if [ "$action" == "start" ]; then
|
|
||||||
SERVICE_START_TIME=$(date +%s)
|
|
||||||
fi
|
|
||||||
|
|
||||||
if sudo systemctl start plexmediaserver.service; then
|
if sudo systemctl start plexmediaserver.service; then
|
||||||
log_success "Plex service start command issued"
|
log_success "Plex service start command issued"
|
||||||
# Wait for service to be fully running with progress indicator
|
# Wait for service to be fully running with progress indicator
|
||||||
@@ -938,7 +956,8 @@ check_disk_space() {
|
|||||||
local backup_dir="$1"
|
local backup_dir="$1"
|
||||||
local required_space_mb="$2"
|
local required_space_mb="$2"
|
||||||
|
|
||||||
local available_space_kb=$(df "$backup_dir" | awk 'NR==2 {print $4}')
|
local available_space_kb
|
||||||
|
available_space_kb=$(df "$backup_dir" | awk 'NR==2 {print $4}')
|
||||||
local available_space_mb=$((available_space_kb / 1024))
|
local available_space_mb=$((available_space_kb / 1024))
|
||||||
|
|
||||||
if [ "$available_space_mb" -lt "$required_space_mb" ]; then
|
if [ "$available_space_mb" -lt "$required_space_mb" ]; then
|
||||||
@@ -957,7 +976,8 @@ estimate_backup_size() {
|
|||||||
for nickname in "${!PLEX_FILES[@]}"; do
|
for nickname in "${!PLEX_FILES[@]}"; do
|
||||||
local file="${PLEX_FILES[$nickname]}"
|
local file="${PLEX_FILES[$nickname]}"
|
||||||
if [ -f "$file" ]; then
|
if [ -f "$file" ]; then
|
||||||
local size_kb=$(du -k "$file" 2>/dev/null | cut -f1)
|
local size_kb
|
||||||
|
size_kb=$(du -k "$file" 2>/dev/null | cut -f1)
|
||||||
total_size=$((total_size + size_kb))
|
total_size=$((total_size + size_kb))
|
||||||
fi
|
fi
|
||||||
done
|
done
|
||||||
@@ -977,10 +997,14 @@ generate_performance_report() {
|
|||||||
jq -r '.[-10:] | .[] | " \(.operation): \(.duration_seconds)s (\(.timestamp))"' "$PERFORMANCE_LOG_FILE" 2>/dev/null || true
|
jq -r '.[-10:] | .[] | " \(.operation): \(.duration_seconds)s (\(.timestamp))"' "$PERFORMANCE_LOG_FILE" 2>/dev/null || true
|
||||||
|
|
||||||
# Calculate averages for common operations
|
# Calculate averages for common operations
|
||||||
local avg_backup=$(jq '[.[] | select(.operation == "backup") | .duration_seconds] | if length > 0 then add/length else 0 end' "$PERFORMANCE_LOG_FILE" 2>/dev/null || echo "0")
|
local avg_backup
|
||||||
local avg_verification=$(jq '[.[] | select(.operation == "verification") | .duration_seconds] | if length > 0 then add/length else 0 end' "$PERFORMANCE_LOG_FILE" 2>/dev/null || echo "0")
|
avg_backup=$(jq '[.[] | select(.operation == "backup") | .duration_seconds] | if length > 0 then add/length else 0 end' "$PERFORMANCE_LOG_FILE" 2>/dev/null || echo "0")
|
||||||
local avg_service_stop=$(jq '[.[] | select(.operation == "service_stop") | .duration_seconds] | if length > 0 then add/length else 0 end' "$PERFORMANCE_LOG_FILE" 2>/dev/null || echo "0")
|
local avg_verification
|
||||||
local avg_service_start=$(jq '[.[] | select(.operation == "service_start") | .duration_seconds] | if length > 0 then add/length else 0 end' "$PERFORMANCE_LOG_FILE" 2>/dev/null || echo "0")
|
avg_verification=$(jq '[.[] | select(.operation == "verification") | .duration_seconds] | if length > 0 then add/length else 0 end' "$PERFORMANCE_LOG_FILE" 2>/dev/null || echo "0")
|
||||||
|
local avg_service_stop
|
||||||
|
avg_service_stop=$(jq '[.[] | select(.operation == "service_stop") | .duration_seconds] | if length > 0 then add/length else 0 end' "$PERFORMANCE_LOG_FILE" 2>/dev/null || echo "0")
|
||||||
|
local avg_service_start
|
||||||
|
avg_service_start=$(jq '[.[] | select(.operation == "service_start") | .duration_seconds] | if length > 0 then add/length else 0 end' "$PERFORMANCE_LOG_FILE" 2>/dev/null || echo "0")
|
||||||
|
|
||||||
if [ "$avg_backup" != "0" ]; then
|
if [ "$avg_backup" != "0" ]; then
|
||||||
log_info "Average backup time: ${avg_backup}s"
|
log_info "Average backup time: ${avg_backup}s"
|
||||||
@@ -1004,7 +1028,8 @@ cleanup_old_backups() {
|
|||||||
find "${BACKUP_ROOT}" -maxdepth 1 -type f -name "plex-backup-*.tar.gz" -mtime +${MAX_BACKUP_AGE_DAYS} -delete 2>/dev/null || true
|
find "${BACKUP_ROOT}" -maxdepth 1 -type f -name "plex-backup-*.tar.gz" -mtime +${MAX_BACKUP_AGE_DAYS} -delete 2>/dev/null || true
|
||||||
|
|
||||||
# Keep only MAX_BACKUPS_TO_KEEP most recent backups
|
# Keep only MAX_BACKUPS_TO_KEEP most recent backups
|
||||||
local backup_count=$(find "${BACKUP_ROOT}" -maxdepth 1 -type f -name "plex-backup-*.tar.gz" | wc -l)
|
local backup_count
|
||||||
|
backup_count=$(find "${BACKUP_ROOT}" -maxdepth 1 -type f -name "plex-backup-*.tar.gz" | wc -l)
|
||||||
|
|
||||||
if [ "$backup_count" -gt "$MAX_BACKUPS_TO_KEEP" ]; then
|
if [ "$backup_count" -gt "$MAX_BACKUPS_TO_KEEP" ]; then
|
||||||
local excess_count=$((backup_count - MAX_BACKUPS_TO_KEEP))
|
local excess_count=$((backup_count - MAX_BACKUPS_TO_KEEP))
|
||||||
@@ -1103,7 +1128,8 @@ check_integrity_only() {
|
|||||||
|
|
||||||
# Main backup function
|
# Main backup function
|
||||||
main() {
|
main() {
|
||||||
local overall_start=$(date +%s)
|
local overall_start
|
||||||
|
overall_start=$(date +%s)
|
||||||
|
|
||||||
log_message "Starting enhanced Plex backup process at $(date)"
|
log_message "Starting enhanced Plex backup process at $(date)"
|
||||||
send_notification "Backup Started" "Plex backup process initiated" "info"
|
send_notification "Backup Started" "Plex backup process initiated" "info"
|
||||||
@@ -1118,11 +1144,13 @@ main() {
|
|||||||
# Check if only doing integrity check
|
# Check if only doing integrity check
|
||||||
if [ "$INTEGRITY_CHECK_ONLY" = true ]; then
|
if [ "$INTEGRITY_CHECK_ONLY" = true ]; then
|
||||||
check_integrity_only
|
check_integrity_only
|
||||||
|
# shellcheck disable=SC2317
|
||||||
return $?
|
return $?
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Estimate backup size
|
# Estimate backup size
|
||||||
local estimated_size_mb=$(estimate_backup_size)
|
local estimated_size_mb
|
||||||
|
estimated_size_mb=$(estimate_backup_size)
|
||||||
log_message "Estimated backup size: ${estimated_size_mb}MB"
|
log_message "Estimated backup size: ${estimated_size_mb}MB"
|
||||||
|
|
||||||
# Check disk space (require 2x estimated size for safety)
|
# Check disk space (require 2x estimated size for safety)
|
||||||
@@ -1217,7 +1245,8 @@ main() {
|
|||||||
handle_wal_files "backup" "$BACKUP_PATH"
|
handle_wal_files "backup" "$BACKUP_PATH"
|
||||||
|
|
||||||
# Backup files - always perform full backup
|
# Backup files - always perform full backup
|
||||||
local backup_start=$(date +%s)
|
local backup_start
|
||||||
|
backup_start=$(date +%s)
|
||||||
for nickname in "${!PLEX_FILES[@]}"; do
|
for nickname in "${!PLEX_FILES[@]}"; do
|
||||||
local file="${PLEX_FILES[$nickname]}"
|
local file="${PLEX_FILES[$nickname]}"
|
||||||
|
|
||||||
@@ -1225,7 +1254,8 @@ main() {
|
|||||||
log_message "Backing up: $(basename "$file")"
|
log_message "Backing up: $(basename "$file")"
|
||||||
|
|
||||||
# Create backup filename without timestamp (use original filename)
|
# Create backup filename without timestamp (use original filename)
|
||||||
local backup_file="${BACKUP_PATH}/$(basename "$file")"
|
local backup_file
|
||||||
|
backup_file="${BACKUP_PATH}/$(basename "$file")"
|
||||||
|
|
||||||
# Copy file
|
# Copy file
|
||||||
if sudo cp "$file" "$backup_file"; then
|
if sudo cp "$file" "$backup_file"; then
|
||||||
@@ -1269,14 +1299,17 @@ main() {
|
|||||||
log_error "Backup root directory is not writable: $BACKUP_ROOT"
|
log_error "Backup root directory is not writable: $BACKUP_ROOT"
|
||||||
backup_errors=$((backup_errors + 1))
|
backup_errors=$((backup_errors + 1))
|
||||||
else
|
else
|
||||||
local temp_archive="/tmp/plex-backup-$(date '+%Y%m%d_%H%M%S').tar.gz"
|
local temp_archive
|
||||||
local final_archive="${BACKUP_ROOT}/plex-backup-$(date '+%Y%m%d_%H%M%S').tar.gz"
|
temp_archive="/tmp/plex-backup-$(date '+%Y%m%d_%H%M%S').tar.gz"
|
||||||
|
local final_archive
|
||||||
|
final_archive="${BACKUP_ROOT}/plex-backup-$(date '+%Y%m%d_%H%M%S').tar.gz"
|
||||||
|
|
||||||
log_info "Temporary archive: $temp_archive"
|
log_info "Temporary archive: $temp_archive"
|
||||||
log_info "Final archive: $final_archive"
|
log_info "Final archive: $final_archive"
|
||||||
|
|
||||||
# Create archive in /tmp first, containing only the backed up files
|
# Create archive in /tmp first, containing only the backed up files
|
||||||
local temp_dir="/tmp/plex-backup-staging-$(date '+%Y%m%d_%H%M%S')"
|
local temp_dir
|
||||||
|
temp_dir="/tmp/plex-backup-staging-$(date '+%Y%m%d_%H%M%S')"
|
||||||
if ! mkdir -p "$temp_dir"; then
|
if ! mkdir -p "$temp_dir"; then
|
||||||
log_error "Failed to create staging directory: $temp_dir"
|
log_error "Failed to create staging directory: $temp_dir"
|
||||||
backup_errors=$((backup_errors + 1))
|
backup_errors=$((backup_errors + 1))
|
||||||
@@ -1287,7 +1320,8 @@ main() {
|
|||||||
local files_staged=0
|
local files_staged=0
|
||||||
for nickname in "${!PLEX_FILES[@]}"; do
|
for nickname in "${!PLEX_FILES[@]}"; do
|
||||||
local file="${PLEX_FILES[$nickname]}"
|
local file="${PLEX_FILES[$nickname]}"
|
||||||
local backup_file="${BACKUP_PATH}/$(basename "$file")"
|
local backup_file
|
||||||
|
backup_file="${BACKUP_PATH}/$(basename "$file")"
|
||||||
if [ -f "$backup_file" ]; then
|
if [ -f "$backup_file" ]; then
|
||||||
if cp "$backup_file" "$temp_dir/"; then
|
if cp "$backup_file" "$temp_dir/"; then
|
||||||
files_staged=$((files_staged + 1))
|
files_staged=$((files_staged + 1))
|
||||||
@@ -1309,9 +1343,11 @@ main() {
|
|||||||
log_info "Staged $files_staged files for archive creation"
|
log_info "Staged $files_staged files for archive creation"
|
||||||
|
|
||||||
# Check disk space in /tmp
|
# Check disk space in /tmp
|
||||||
local temp_available_kb=$(df /tmp | awk 'NR==2 {print $4}')
|
local temp_available_kb
|
||||||
|
temp_available_kb=$(df /tmp | awk 'NR==2 {print $4}')
|
||||||
local temp_available_mb=$((temp_available_kb / 1024))
|
local temp_available_mb=$((temp_available_kb / 1024))
|
||||||
local staging_size_mb=$(du -sm "$temp_dir" | cut -f1)
|
local staging_size_mb
|
||||||
|
staging_size_mb=$(du -sm "$temp_dir" | cut -f1)
|
||||||
log_info "/tmp available space: ${temp_available_mb}MB, staging directory size: ${staging_size_mb}MB"
|
log_info "/tmp available space: ${temp_available_mb}MB, staging directory size: ${staging_size_mb}MB"
|
||||||
|
|
||||||
# Check if we have enough space (require 3x staging size for compression)
|
# Check if we have enough space (require 3x staging size for compression)
|
||||||
@@ -1330,7 +1366,8 @@ main() {
|
|||||||
if [ $tar_exit_code -eq 0 ]; then
|
if [ $tar_exit_code -eq 0 ]; then
|
||||||
# Verify archive was actually created and has reasonable size
|
# Verify archive was actually created and has reasonable size
|
||||||
if [ -f "$temp_archive" ]; then
|
if [ -f "$temp_archive" ]; then
|
||||||
local archive_size_mb=$(du -sm "$temp_archive" | cut -f1)
|
local archive_size_mb
|
||||||
|
archive_size_mb=$(du -sm "$temp_archive" | cut -f1)
|
||||||
log_success "Archive created successfully: $(basename "$temp_archive") (${archive_size_mb}MB)"
|
log_success "Archive created successfully: $(basename "$temp_archive") (${archive_size_mb}MB)"
|
||||||
|
|
||||||
# Test archive integrity before moving
|
# Test archive integrity before moving
|
||||||
@@ -1345,7 +1382,8 @@ main() {
|
|||||||
rm -rf "$temp_dir"
|
rm -rf "$temp_dir"
|
||||||
for nickname in "${!PLEX_FILES[@]}"; do
|
for nickname in "${!PLEX_FILES[@]}"; do
|
||||||
local file="${PLEX_FILES[$nickname]}"
|
local file="${PLEX_FILES[$nickname]}"
|
||||||
local backup_file="${BACKUP_PATH}/$(basename "$file")"
|
local backup_file
|
||||||
|
backup_file="${BACKUP_PATH}/$(basename "$file")"
|
||||||
rm -f "$backup_file" "$backup_file.md5"
|
rm -f "$backup_file" "$backup_file.md5"
|
||||||
done
|
done
|
||||||
else
|
else
|
||||||
@@ -1374,11 +1412,12 @@ main() {
|
|||||||
|
|
||||||
# Additional diagnostic information
|
# Additional diagnostic information
|
||||||
log_error "Staging directory contents:"
|
log_error "Staging directory contents:"
|
||||||
ls -la "$temp_dir" 2>&1 | while IFS= read -r line; do
|
find "$temp_dir" -ls 2>&1 | while IFS= read -r line; do
|
||||||
log_error " $line"
|
log_error " $line"
|
||||||
done
|
done
|
||||||
|
|
||||||
local temp_usage=$(df -h /tmp | awk 'NR==2 {print "Used: " $3 "/" $2 " (" $5 ")"}')
|
local temp_usage
|
||||||
|
temp_usage=$(df -h /tmp | awk 'NR==2 {print "Used: " $3 "/" $2 " (" $5 ")"}')
|
||||||
log_error "Temp filesystem status: $temp_usage"
|
log_error "Temp filesystem status: $temp_usage"
|
||||||
|
|
||||||
rm -rf "$temp_dir"
|
rm -rf "$temp_dir"
|
||||||
@@ -1390,7 +1429,8 @@ main() {
|
|||||||
fi
|
fi
|
||||||
|
|
||||||
# Send notification
|
# Send notification
|
||||||
local files_list=$(format_backed_up_files "${backed_up_files[@]}")
|
local files_list
|
||||||
|
files_list=$(format_backed_up_files "${backed_up_files[@]}")
|
||||||
send_notification "Backup Completed" "Successfully backed up $files_list" "success"
|
send_notification "Backup Completed" "Successfully backed up $files_list" "success"
|
||||||
else
|
else
|
||||||
log_message "No files needed backup"
|
log_message "No files needed backup"
|
||||||
@@ -1426,7 +1466,8 @@ main() {
|
|||||||
exit 1
|
exit 1
|
||||||
else
|
else
|
||||||
log_success "Enhanced backup completed successfully"
|
log_success "Enhanced backup completed successfully"
|
||||||
local files_list=$(format_backed_up_files "${backed_up_files[@]}")
|
local files_list
|
||||||
|
files_list=$(format_backed_up_files "${backed_up_files[@]}")
|
||||||
send_notification "Backup Success" "$files_list backed up successfully in ${total_time}s" "success"
|
send_notification "Backup Success" "$files_list backed up successfully in ${total_time}s" "success"
|
||||||
fi
|
fi
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -60,8 +60,6 @@ NC='\033[0m' # No Color
|
|||||||
|
|
||||||
# Configuration
|
# Configuration
|
||||||
PLEX_DB_DIR="/var/lib/plexmediaserver/Library/Application Support/Plex Media Server/Plug-in Support/Databases"
|
PLEX_DB_DIR="/var/lib/plexmediaserver/Library/Application Support/Plex Media Server/Plug-in Support/Databases"
|
||||||
PLEX_USER="plex"
|
|
||||||
PLEX_GROUP="plex"
|
|
||||||
BACKUP_TIMESTAMP=$(date +"%Y%m%d_%H%M%S")
|
BACKUP_TIMESTAMP=$(date +"%Y%m%d_%H%M%S")
|
||||||
RECOVERY_LOG="/home/acedanger/shell/plex/logs/icu-recovery-${BACKUP_TIMESTAMP}.log"
|
RECOVERY_LOG="/home/acedanger/shell/plex/logs/icu-recovery-${BACKUP_TIMESTAMP}.log"
|
||||||
|
|
||||||
@@ -72,7 +70,8 @@ mkdir -p "$(dirname "$RECOVERY_LOG")"
|
|||||||
log_message() {
|
log_message() {
|
||||||
local level="$1"
|
local level="$1"
|
||||||
local message="$2"
|
local message="$2"
|
||||||
local timestamp=$(date '+%Y-%m-%d %H:%M:%S')
|
local timestamp
|
||||||
|
timestamp=$(date '+%Y-%m-%d %H:%M:%S')
|
||||||
echo "[$timestamp] [$level] $message" | tee -a "$RECOVERY_LOG"
|
echo "[$timestamp] [$level] $message" | tee -a "$RECOVERY_LOG"
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -115,7 +114,8 @@ verify_database_basic() {
|
|||||||
return 1
|
return 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
local file_size=$(stat -c%s "$db_file" 2>/dev/null || stat -f%z "$db_file" 2>/dev/null)
|
local file_size
|
||||||
|
file_size=$(stat -c%s "$db_file" 2>/dev/null || stat -f%z "$db_file" 2>/dev/null)
|
||||||
if [[ $file_size -lt 1024 ]]; then
|
if [[ $file_size -lt 1024 ]]; then
|
||||||
print_status "$RED" "$db_name: File is too small ($file_size bytes)"
|
print_status "$RED" "$db_name: File is too small ($file_size bytes)"
|
||||||
return 1
|
return 1
|
||||||
@@ -132,7 +132,8 @@ verify_database_basic() {
|
|||||||
print_status "$GREEN" "$db_name: Basic SQLite operations successful"
|
print_status "$GREEN" "$db_name: Basic SQLite operations successful"
|
||||||
|
|
||||||
# Count tables
|
# Count tables
|
||||||
local table_count=$(sqlite3 "$db_file" "SELECT COUNT(*) FROM sqlite_master WHERE type='table';" 2>/dev/null || echo "0")
|
local table_count
|
||||||
|
table_count=$(sqlite3 "$db_file" "SELECT COUNT(*) FROM sqlite_master WHERE type='table';" 2>/dev/null || echo "0")
|
||||||
print_status "$GREEN" "$db_name: Contains $table_count tables"
|
print_status "$GREEN" "$db_name: Contains $table_count tables"
|
||||||
|
|
||||||
return 0
|
return 0
|
||||||
@@ -262,12 +263,14 @@ check_database_sizes() {
|
|||||||
local blobs_db="${PLEX_DB_DIR}/com.plexapp.plugins.library.blobs.db"
|
local blobs_db="${PLEX_DB_DIR}/com.plexapp.plugins.library.blobs.db"
|
||||||
|
|
||||||
if [[ -f "$main_db" ]]; then
|
if [[ -f "$main_db" ]]; then
|
||||||
local main_size=$(du -h "$main_db" | cut -f1)
|
local main_size
|
||||||
|
main_size=$(du -h "$main_db" | cut -f1)
|
||||||
print_status "$GREEN" "Main database size: $main_size"
|
print_status "$GREEN" "Main database size: $main_size"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [[ -f "$blobs_db" ]]; then
|
if [[ -f "$blobs_db" ]]; then
|
||||||
local blobs_size=$(du -h "$blobs_db" | cut -f1)
|
local blobs_size
|
||||||
|
blobs_size=$(du -h "$blobs_db" | cut -f1)
|
||||||
print_status "$GREEN" "Blobs database size: $blobs_size"
|
print_status "$GREEN" "Blobs database size: $blobs_size"
|
||||||
fi
|
fi
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -75,30 +75,35 @@ declare -a FAILED_INTEGRATION_TESTS=()
|
|||||||
|
|
||||||
# Logging functions
|
# Logging functions
|
||||||
log_test() {
|
log_test() {
|
||||||
local timestamp=$(date '+%Y-%m-%d %H:%M:%S')
|
local timestamp
|
||||||
|
timestamp=$(date '+%Y-%m-%d %H:%M:%S')
|
||||||
echo -e "${CYAN}[INTEGRATION ${timestamp}]${NC} $1"
|
echo -e "${CYAN}[INTEGRATION ${timestamp}]${NC} $1"
|
||||||
}
|
}
|
||||||
|
|
||||||
log_pass() {
|
log_pass() {
|
||||||
local timestamp=$(date '+%Y-%m-%d %H:%M:%S')
|
local timestamp
|
||||||
|
timestamp=$(date '+%Y-%m-%d %H:%M:%S')
|
||||||
echo -e "${GREEN}[PASS ${timestamp}]${NC} $1"
|
echo -e "${GREEN}[PASS ${timestamp}]${NC} $1"
|
||||||
INTEGRATION_ASSERTIONS_PASSED=$((INTEGRATION_ASSERTIONS_PASSED + 1))
|
INTEGRATION_ASSERTIONS_PASSED=$((INTEGRATION_ASSERTIONS_PASSED + 1))
|
||||||
}
|
}
|
||||||
|
|
||||||
log_fail() {
|
log_fail() {
|
||||||
local timestamp=$(date '+%Y-%m-%d %H:%M:%S')
|
local timestamp
|
||||||
|
timestamp=$(date '+%Y-%m-%d %H:%M:%S')
|
||||||
echo -e "${RED}[FAIL ${timestamp}]${NC} $1"
|
echo -e "${RED}[FAIL ${timestamp}]${NC} $1"
|
||||||
INTEGRATION_ASSERTIONS_FAILED=$((INTEGRATION_ASSERTIONS_FAILED + 1))
|
INTEGRATION_ASSERTIONS_FAILED=$((INTEGRATION_ASSERTIONS_FAILED + 1))
|
||||||
FAILED_INTEGRATION_TESTS+=("$1")
|
FAILED_INTEGRATION_TESTS+=("$1")
|
||||||
}
|
}
|
||||||
|
|
||||||
log_info() {
|
log_info() {
|
||||||
local timestamp=$(date '+%Y-%m-%d %H:%M:%S')
|
local timestamp
|
||||||
|
timestamp=$(date '+%Y-%m-%d %H:%M:%S')
|
||||||
echo -e "${BLUE}[INFO ${timestamp}]${NC} $1"
|
echo -e "${BLUE}[INFO ${timestamp}]${NC} $1"
|
||||||
}
|
}
|
||||||
|
|
||||||
log_warn() {
|
log_warn() {
|
||||||
local timestamp=$(date '+%Y-%m-%d %H:%M:%S')
|
local timestamp
|
||||||
|
timestamp=$(date '+%Y-%m-%d %H:%M:%S')
|
||||||
echo -e "${YELLOW}[WARN ${timestamp}]${NC} $1"
|
echo -e "${YELLOW}[WARN ${timestamp}]${NC} $1"
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -209,13 +214,16 @@ test_performance_monitoring() {
|
|||||||
echo "[]" > "$test_perf_log"
|
echo "[]" > "$test_perf_log"
|
||||||
|
|
||||||
# Simulate performance tracking
|
# Simulate performance tracking
|
||||||
local start_time=$(date +%s)
|
local start_time
|
||||||
|
start_time=$(date +%s)
|
||||||
sleep 1
|
sleep 1
|
||||||
local end_time=$(date +%s)
|
local end_time
|
||||||
|
end_time=$(date +%s)
|
||||||
local duration=$((end_time - start_time))
|
local duration=$((end_time - start_time))
|
||||||
|
|
||||||
# Create performance entry
|
# Create performance entry
|
||||||
local entry=$(jq -n \
|
local entry
|
||||||
|
entry=$(jq -n \
|
||||||
--arg operation "integration_test" \
|
--arg operation "integration_test" \
|
||||||
--arg duration "$duration" \
|
--arg duration "$duration" \
|
||||||
--arg timestamp "$(date -Iseconds)" \
|
--arg timestamp "$(date -Iseconds)" \
|
||||||
@@ -230,7 +238,8 @@ test_performance_monitoring() {
|
|||||||
mv "${test_perf_log}.tmp" "$test_perf_log"
|
mv "${test_perf_log}.tmp" "$test_perf_log"
|
||||||
|
|
||||||
# Verify entry was added
|
# Verify entry was added
|
||||||
local entry_count=$(jq length "$test_perf_log")
|
local entry_count
|
||||||
|
entry_count=$(jq length "$test_perf_log")
|
||||||
if [ "$entry_count" -eq 1 ]; then
|
if [ "$entry_count" -eq 1 ]; then
|
||||||
log_pass "Performance monitoring integration works"
|
log_pass "Performance monitoring integration works"
|
||||||
else
|
else
|
||||||
@@ -338,7 +347,8 @@ test_parallel_processing() {
|
|||||||
INTEGRATION_TEST_FUNCTIONS=$((INTEGRATION_TEST_FUNCTIONS + 1))
|
INTEGRATION_TEST_FUNCTIONS=$((INTEGRATION_TEST_FUNCTIONS + 1))
|
||||||
log_test "Parallel Processing Capabilities"
|
log_test "Parallel Processing Capabilities"
|
||||||
|
|
||||||
local temp_dir=$(mktemp -d)
|
local temp_dir
|
||||||
|
temp_dir=$(mktemp -d)
|
||||||
local -a pids=()
|
local -a pids=()
|
||||||
local total_jobs=3
|
local total_jobs=3
|
||||||
local completed_jobs=0
|
local completed_jobs=0
|
||||||
@@ -347,7 +357,7 @@ test_parallel_processing() {
|
|||||||
for i in $(seq 1 $total_jobs); do
|
for i in $(seq 1 $total_jobs); do
|
||||||
(
|
(
|
||||||
# Simulate parallel work
|
# Simulate parallel work
|
||||||
sleep 0.$i
|
sleep "0.$i"
|
||||||
echo "Job $i completed" > "$temp_dir/job_$i.result"
|
echo "Job $i completed" > "$temp_dir/job_$i.result"
|
||||||
) &
|
) &
|
||||||
pids+=($!)
|
pids+=($!)
|
||||||
@@ -361,7 +371,8 @@ test_parallel_processing() {
|
|||||||
done
|
done
|
||||||
|
|
||||||
# Verify results
|
# Verify results
|
||||||
local result_files=$(find "$temp_dir" -name "job_*.result" | wc -l)
|
local result_files
|
||||||
|
result_files=$(find "$temp_dir" -name "job_*.result" | wc -l)
|
||||||
|
|
||||||
# Cleanup
|
# Cleanup
|
||||||
rm -rf "$temp_dir"
|
rm -rf "$temp_dir"
|
||||||
@@ -386,15 +397,19 @@ test_checksum_caching() {
|
|||||||
echo "checksum test content" > "$test_file"
|
echo "checksum test content" > "$test_file"
|
||||||
|
|
||||||
# First checksum calculation (should create cache)
|
# First checksum calculation (should create cache)
|
||||||
local checksum1=$(md5sum "$test_file" | cut -d' ' -f1)
|
local checksum1
|
||||||
|
checksum1=$(md5sum "$test_file" | cut -d' ' -f1)
|
||||||
echo "$checksum1" > "$cache_file"
|
echo "$checksum1" > "$cache_file"
|
||||||
|
|
||||||
# Simulate cache check
|
# Simulate cache check
|
||||||
local file_mtime=$(stat -c %Y "$test_file")
|
local file_mtime
|
||||||
local cache_mtime=$(stat -c %Y "$cache_file")
|
file_mtime=$(stat -c %Y "$test_file")
|
||||||
|
local cache_mtime
|
||||||
|
cache_mtime=$(stat -c %Y "$cache_file")
|
||||||
|
|
||||||
if [ "$cache_mtime" -ge "$file_mtime" ]; then
|
if [ "$cache_mtime" -ge "$file_mtime" ]; then
|
||||||
local cached_checksum=$(cat "$cache_file")
|
local cached_checksum
|
||||||
|
cached_checksum=$(cat "$cache_file")
|
||||||
if [ "$cached_checksum" = "$checksum1" ]; then
|
if [ "$cached_checksum" = "$checksum1" ]; then
|
||||||
log_pass "Checksum caching system works correctly"
|
log_pass "Checksum caching system works correctly"
|
||||||
else
|
else
|
||||||
|
|||||||
526
plex/integration-test-plex.sh.sc2086_backup
Executable file
526
plex/integration-test-plex.sh.sc2086_backup
Executable file
@@ -0,0 +1,526 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
################################################################################
|
||||||
|
# Plex Backup System Integration Test Suite
|
||||||
|
################################################################################
|
||||||
|
#
|
||||||
|
# Author: Peter Wood <peter@peterwood.dev>
|
||||||
|
# Description: End-to-end integration testing framework for the complete Plex
|
||||||
|
# backup ecosystem. Tests backup, restoration, validation, and
|
||||||
|
# monitoring systems in controlled environments without affecting
|
||||||
|
# production Plex installations.
|
||||||
|
#
|
||||||
|
# Features:
|
||||||
|
# - Full workflow integration testing
|
||||||
|
# - Isolated test environment creation
|
||||||
|
# - Production-safe testing procedures
|
||||||
|
# - Multi-scenario testing (normal, error, edge cases)
|
||||||
|
# - Performance benchmarking under load
|
||||||
|
# - Service integration validation
|
||||||
|
# - Cross-script compatibility testing
|
||||||
|
#
|
||||||
|
# Related Scripts:
|
||||||
|
# - backup-plex.sh: Primary backup system under test
|
||||||
|
# - restore-plex.sh: Restoration workflow testing
|
||||||
|
# - validate-plex-backups.sh: Validation system testing
|
||||||
|
# - monitor-plex-backup.sh: Monitoring integration
|
||||||
|
# - test-plex-backup.sh: Unit testing complement
|
||||||
|
# - plex.sh: Service management integration
|
||||||
|
#
|
||||||
|
# Usage:
|
||||||
|
# ./integration-test-plex.sh # Full integration test suite
|
||||||
|
# ./integration-test-plex.sh --quick # Quick smoke tests
|
||||||
|
# ./integration-test-plex.sh --performance # Performance benchmarks
|
||||||
|
# ./integration-test-plex.sh --cleanup # Clean test artifacts
|
||||||
|
#
|
||||||
|
# Dependencies:
|
||||||
|
# - All Plex backup scripts in this directory
|
||||||
|
# - sqlite3 or Plex SQLite binary
|
||||||
|
# - Temporary filesystem space (for test environments)
|
||||||
|
# - systemctl (for service testing scenarios)
|
||||||
|
#
|
||||||
|
# Exit Codes:
|
||||||
|
# 0 - All integration tests passed
|
||||||
|
# 1 - General error
|
||||||
|
# 2 - Integration test failures
|
||||||
|
# 3 - Test environment setup failure
|
||||||
|
# 4 - Performance benchmarks failed
|
||||||
|
#
|
||||||
|
################################################################################
|
||||||
|
|
||||||
|
# Plex Backup Integration Test Suite
|
||||||
|
# This script tests the enhanced backup features in a controlled environment
|
||||||
|
# without affecting production Plex installation
|
||||||
|
|
||||||
|
set -e
|
||||||
|
|
||||||
|
# Color codes for output
|
||||||
|
RED='\033[0;31m'
|
||||||
|
GREEN='\033[0;32m'
|
||||||
|
YELLOW='\033[1;33m'
|
||||||
|
BLUE='\033[0;34m'
|
||||||
|
CYAN='\033[0;36m'
|
||||||
|
NC='\033[0m' # No Color
|
||||||
|
|
||||||
|
# Test configuration
|
||||||
|
SCRIPT_DIR="$(dirname "$(readlink -f "$0")")"
|
||||||
|
TEST_DIR="/tmp/plex-integration-test-$(date +%s)"
|
||||||
|
BACKUP_SCRIPT="$SCRIPT_DIR/backup-plex.sh"
|
||||||
|
|
||||||
|
# Test counters
|
||||||
|
INTEGRATION_TEST_FUNCTIONS=0
|
||||||
|
INTEGRATION_ASSERTIONS_PASSED=0
|
||||||
|
INTEGRATION_ASSERTIONS_FAILED=0
|
||||||
|
declare -a FAILED_INTEGRATION_TESTS=()
|
||||||
|
|
||||||
|
# Logging functions
|
||||||
|
log_test() {
|
||||||
|
local timestamp=$(date '+%Y-%m-%d %H:%M:%S')
|
||||||
|
echo -e "${CYAN}[INTEGRATION ${timestamp}]${NC} $1"
|
||||||
|
}
|
||||||
|
|
||||||
|
log_pass() {
|
||||||
|
local timestamp=$(date '+%Y-%m-%d %H:%M:%S')
|
||||||
|
echo -e "${GREEN}[PASS ${timestamp}]${NC} $1"
|
||||||
|
INTEGRATION_ASSERTIONS_PASSED=$((INTEGRATION_ASSERTIONS_PASSED + 1))
|
||||||
|
}
|
||||||
|
|
||||||
|
log_fail() {
|
||||||
|
local timestamp=$(date '+%Y-%m-%d %H:%M:%S')
|
||||||
|
echo -e "${RED}[FAIL ${timestamp}]${NC} $1"
|
||||||
|
INTEGRATION_ASSERTIONS_FAILED=$((INTEGRATION_ASSERTIONS_FAILED + 1))
|
||||||
|
FAILED_INTEGRATION_TESTS+=("$1")
|
||||||
|
}
|
||||||
|
|
||||||
|
log_info() {
|
||||||
|
local timestamp=$(date '+%Y-%m-%d %H:%M:%S')
|
||||||
|
echo -e "${BLUE}[INFO ${timestamp}]${NC} $1"
|
||||||
|
}
|
||||||
|
|
||||||
|
log_warn() {
|
||||||
|
local timestamp=$(date '+%Y-%m-%d %H:%M:%S')
|
||||||
|
echo -e "${YELLOW}[WARN ${timestamp}]${NC} $1"
|
||||||
|
}
|
||||||
|
|
||||||
|
# Setup integration test environment
|
||||||
|
setup_integration_environment() {
|
||||||
|
log_info "Setting up integration test environment"
|
||||||
|
|
||||||
|
# Create test directories
|
||||||
|
mkdir -p "$TEST_DIR"
|
||||||
|
mkdir -p "$TEST_DIR/mock_plex_data"
|
||||||
|
mkdir -p "$TEST_DIR/backup_destination"
|
||||||
|
mkdir -p "$TEST_DIR/logs"
|
||||||
|
|
||||||
|
# Create mock Plex database files with realistic content
|
||||||
|
create_mock_database "$TEST_DIR/mock_plex_data/com.plexapp.plugins.library.db"
|
||||||
|
create_mock_database "$TEST_DIR/mock_plex_data/com.plexapp.plugins.library.blobs.db"
|
||||||
|
|
||||||
|
# Create mock Preferences.xml
|
||||||
|
create_mock_preferences "$TEST_DIR/mock_plex_data/Preferences.xml"
|
||||||
|
|
||||||
|
# Create mock WAL files to test WAL handling
|
||||||
|
echo "WAL data simulation" > "$TEST_DIR/mock_plex_data/com.plexapp.plugins.library.db-wal"
|
||||||
|
echo "SHM data simulation" > "$TEST_DIR/mock_plex_data/com.plexapp.plugins.library.db-shm"
|
||||||
|
|
||||||
|
log_info "Integration test environment ready"
|
||||||
|
}
|
||||||
|
|
||||||
|
# Create mock SQLite database for testing
|
||||||
|
create_mock_database() {
|
||||||
|
local db_file="$1"
|
||||||
|
|
||||||
|
# Create a proper SQLite database with some test data
|
||||||
|
sqlite3 "$db_file" << 'EOF'
|
||||||
|
CREATE TABLE library_sections (
|
||||||
|
id INTEGER PRIMARY KEY,
|
||||||
|
name TEXT,
|
||||||
|
type INTEGER,
|
||||||
|
agent TEXT
|
||||||
|
);
|
||||||
|
|
||||||
|
INSERT INTO library_sections (name, type, agent) VALUES
|
||||||
|
('Movies', 1, 'com.plexapp.agents.imdb'),
|
||||||
|
('TV Shows', 2, 'com.plexapp.agents.thetvdb'),
|
||||||
|
('Music', 8, 'com.plexapp.agents.lastfm');
|
||||||
|
|
||||||
|
CREATE TABLE metadata_items (
|
||||||
|
id INTEGER PRIMARY KEY,
|
||||||
|
title TEXT,
|
||||||
|
year INTEGER,
|
||||||
|
added_at DATETIME DEFAULT CURRENT_TIMESTAMP
|
||||||
|
);
|
||||||
|
|
||||||
|
INSERT INTO metadata_items (title, year) VALUES
|
||||||
|
('Test Movie', 2023),
|
||||||
|
('Another Movie', 2024),
|
||||||
|
('Test Show', 2022);
|
||||||
|
|
||||||
|
-- Add some indexes to make it more realistic
|
||||||
|
CREATE INDEX idx_metadata_title ON metadata_items(title);
|
||||||
|
CREATE INDEX idx_library_sections_type ON library_sections(type);
|
||||||
|
EOF
|
||||||
|
|
||||||
|
log_info "Created mock database: $(basename "$db_file")"
|
||||||
|
}
|
||||||
|
|
||||||
|
# Create mock Preferences.xml
|
||||||
|
create_mock_preferences() {
|
||||||
|
local pref_file="$1"
|
||||||
|
|
||||||
|
cat > "$pref_file" << 'EOF'
|
||||||
|
<?xml version="1.0" encoding="utf-8"?>
|
||||||
|
<Preferences OldestPreviousVersion="1.32.8.7639-fb6452ebf" MachineIdentifier="test-machine-12345" ProcessedMachineIdentifier="test-processed-12345" AnonymousMachineIdentifier="test-anon-12345" FriendlyName="Test Plex Server" ManualPortMappingMode="1" TranscoderTempDirectory="/tmp" />
|
||||||
|
EOF
|
||||||
|
|
||||||
|
log_info "Created mock preferences file"
|
||||||
|
}
|
||||||
|
|
||||||
|
# Test command line argument parsing
|
||||||
|
test_command_line_parsing() {
|
||||||
|
INTEGRATION_TEST_FUNCTIONS=$((INTEGRATION_TEST_FUNCTIONS + 1))
|
||||||
|
log_test "Command Line Argument Parsing"
|
||||||
|
|
||||||
|
# Test help output
|
||||||
|
if "$BACKUP_SCRIPT" --help | grep -q "Usage:"; then
|
||||||
|
log_pass "Help output is functional"
|
||||||
|
else
|
||||||
|
log_fail "Help output test failed"
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Test invalid argument handling
|
||||||
|
if ! "$BACKUP_SCRIPT" --invalid-option >/dev/null 2>&1; then
|
||||||
|
log_pass "Invalid argument handling works correctly"
|
||||||
|
else
|
||||||
|
log_fail "Invalid argument handling test failed"
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
# Test performance monitoring features
|
||||||
|
test_performance_monitoring() {
|
||||||
|
INTEGRATION_TEST_FUNCTIONS=$((INTEGRATION_TEST_FUNCTIONS + 1))
|
||||||
|
log_test "Performance Monitoring Features"
|
||||||
|
|
||||||
|
local test_perf_log="$TEST_DIR/test-performance.json"
|
||||||
|
|
||||||
|
# Initialize performance log
|
||||||
|
echo "[]" > "$test_perf_log"
|
||||||
|
|
||||||
|
# Simulate performance tracking
|
||||||
|
local start_time=$(date +%s)
|
||||||
|
sleep 1
|
||||||
|
local end_time=$(date +%s)
|
||||||
|
local duration=$((end_time - start_time))
|
||||||
|
|
||||||
|
# Create performance entry
|
||||||
|
local entry=$(jq -n \
|
||||||
|
--arg operation "integration_test" \
|
||||||
|
--arg duration "$duration" \
|
||||||
|
--arg timestamp "$(date -Iseconds)" \
|
||||||
|
'{
|
||||||
|
operation: $operation,
|
||||||
|
duration_seconds: ($duration | tonumber),
|
||||||
|
timestamp: $timestamp
|
||||||
|
}')
|
||||||
|
|
||||||
|
# Add to log
|
||||||
|
jq --argjson entry "$entry" '. += [$entry]' "$test_perf_log" > "${test_perf_log}.tmp" && \
|
||||||
|
mv "${test_perf_log}.tmp" "$test_perf_log"
|
||||||
|
|
||||||
|
# Verify entry was added
|
||||||
|
local entry_count=$(jq length "$test_perf_log")
|
||||||
|
if [ "$entry_count" -eq 1 ]; then
|
||||||
|
log_pass "Performance monitoring integration works"
|
||||||
|
else
|
||||||
|
log_fail "Performance monitoring integration failed"
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
# Test notification system with mock endpoints
|
||||||
|
test_notification_system() {
|
||||||
|
INTEGRATION_TEST_FUNCTIONS=$((INTEGRATION_TEST_FUNCTIONS + 1))
|
||||||
|
log_test "Notification System Integration"
|
||||||
|
|
||||||
|
# Test webhook notification (mock)
|
||||||
|
local webhook_test_log="$TEST_DIR/webhook_test.log"
|
||||||
|
|
||||||
|
# Mock webhook function
|
||||||
|
test_send_webhook() {
|
||||||
|
local url="$1"
|
||||||
|
local payload="$2"
|
||||||
|
|
||||||
|
# Simulate webhook call
|
||||||
|
echo "Webhook URL: $url" > "$webhook_test_log"
|
||||||
|
echo "Payload: $payload" >> "$webhook_test_log"
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
# Test notification
|
||||||
|
if test_send_webhook "https://example.com/webhook" '{"test": "data"}'; then
|
||||||
|
if [ -f "$webhook_test_log" ] && grep -q "Webhook URL" "$webhook_test_log"; then
|
||||||
|
log_pass "Webhook notification integration works"
|
||||||
|
else
|
||||||
|
log_fail "Webhook notification integration failed"
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
else
|
||||||
|
log_fail "Webhook notification test failed"
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
# Test backup validation system
|
||||||
|
test_backup_validation() {
|
||||||
|
INTEGRATION_TEST_FUNCTIONS=$((INTEGRATION_TEST_FUNCTIONS + 1))
|
||||||
|
log_test "Backup Validation System"
|
||||||
|
|
||||||
|
local test_backup_dir="$TEST_DIR/test_backup_20250525"
|
||||||
|
mkdir -p "$test_backup_dir"
|
||||||
|
|
||||||
|
# Create test backup files
|
||||||
|
cp "$TEST_DIR/mock_plex_data/"*.db "$test_backup_dir/"
|
||||||
|
cp "$TEST_DIR/mock_plex_data/Preferences.xml" "$test_backup_dir/"
|
||||||
|
|
||||||
|
# Test validation script
|
||||||
|
if [ -f "$SCRIPT_DIR/validate-plex-backups.sh" ]; then
|
||||||
|
# Mock the validation by checking file presence
|
||||||
|
local files_present=0
|
||||||
|
for file in com.plexapp.plugins.library.db com.plexapp.plugins.library.blobs.db Preferences.xml; do
|
||||||
|
if [ -f "$test_backup_dir/$file" ]; then
|
||||||
|
files_present=$((files_present + 1))
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
|
||||||
|
if [ "$files_present" -eq 3 ]; then
|
||||||
|
log_pass "Backup validation system works"
|
||||||
|
else
|
||||||
|
log_fail "Backup validation system failed - missing files"
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
else
|
||||||
|
log_warn "Validation script not found, skipping test"
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
# Test database integrity checking
|
||||||
|
test_database_integrity_checking() {
|
||||||
|
INTEGRATION_TEST_FUNCTIONS=$((INTEGRATION_TEST_FUNCTIONS + 1))
|
||||||
|
log_test "Database Integrity Checking"
|
||||||
|
|
||||||
|
# Test with good database
|
||||||
|
local test_db="$TEST_DIR/mock_plex_data/com.plexapp.plugins.library.db"
|
||||||
|
|
||||||
|
# Run integrity check using sqlite3 (since we can't use Plex SQLite in test)
|
||||||
|
if sqlite3 "$test_db" "PRAGMA integrity_check;" | grep -q "ok"; then
|
||||||
|
log_pass "Database integrity checking works for valid database"
|
||||||
|
else
|
||||||
|
log_fail "Database integrity checking failed for valid database"
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Test with corrupted database
|
||||||
|
local corrupted_db="$TEST_DIR/corrupted.db"
|
||||||
|
echo "This is not a valid SQLite database" > "$corrupted_db"
|
||||||
|
|
||||||
|
if ! sqlite3 "$corrupted_db" "PRAGMA integrity_check;" 2>/dev/null | grep -q "ok"; then
|
||||||
|
log_pass "Database integrity checking correctly detects corruption"
|
||||||
|
else
|
||||||
|
log_fail "Database integrity checking failed to detect corruption"
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
# Test parallel processing capabilities
|
||||||
|
test_parallel_processing() {
|
||||||
|
INTEGRATION_TEST_FUNCTIONS=$((INTEGRATION_TEST_FUNCTIONS + 1))
|
||||||
|
log_test "Parallel Processing Capabilities"
|
||||||
|
|
||||||
|
local temp_dir=$(mktemp -d)
|
||||||
|
local -a pids=()
|
||||||
|
local total_jobs=3
|
||||||
|
local completed_jobs=0
|
||||||
|
|
||||||
|
# Start parallel jobs
|
||||||
|
for i in $(seq 1 $total_jobs); do
|
||||||
|
(
|
||||||
|
# Simulate parallel work
|
||||||
|
sleep 0.$i
|
||||||
|
echo "Job $i completed" > "$temp_dir/job_$i.result"
|
||||||
|
) &
|
||||||
|
pids+=($!)
|
||||||
|
done
|
||||||
|
|
||||||
|
# Wait for all jobs
|
||||||
|
for pid in "${pids[@]}"; do
|
||||||
|
if wait "$pid"; then
|
||||||
|
completed_jobs=$((completed_jobs + 1))
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
|
||||||
|
# Verify results
|
||||||
|
local result_files=$(find "$temp_dir" -name "job_*.result" | wc -l)
|
||||||
|
|
||||||
|
# Cleanup
|
||||||
|
rm -rf "$temp_dir"
|
||||||
|
|
||||||
|
if [ "$completed_jobs" -eq "$total_jobs" ] && [ "$result_files" -eq "$total_jobs" ]; then
|
||||||
|
log_pass "Parallel processing works correctly"
|
||||||
|
else
|
||||||
|
log_fail "Parallel processing test failed"
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
# Test checksum caching system
|
||||||
|
test_checksum_caching() {
|
||||||
|
INTEGRATION_TEST_FUNCTIONS=$((INTEGRATION_TEST_FUNCTIONS + 1))
|
||||||
|
log_test "Checksum Caching System"
|
||||||
|
|
||||||
|
local test_file="$TEST_DIR/checksum_test.txt"
|
||||||
|
local cache_file="${test_file}.md5"
|
||||||
|
|
||||||
|
# Create test file
|
||||||
|
echo "checksum test content" > "$test_file"
|
||||||
|
|
||||||
|
# First checksum calculation (should create cache)
|
||||||
|
local checksum1=$(md5sum "$test_file" | cut -d' ' -f1)
|
||||||
|
echo "$checksum1" > "$cache_file"
|
||||||
|
|
||||||
|
# Simulate cache check
|
||||||
|
local file_mtime=$(stat -c %Y "$test_file")
|
||||||
|
local cache_mtime=$(stat -c %Y "$cache_file")
|
||||||
|
|
||||||
|
if [ "$cache_mtime" -ge "$file_mtime" ]; then
|
||||||
|
local cached_checksum=$(cat "$cache_file")
|
||||||
|
if [ "$cached_checksum" = "$checksum1" ]; then
|
||||||
|
log_pass "Checksum caching system works correctly"
|
||||||
|
else
|
||||||
|
log_fail "Checksum caching system failed - checksum mismatch"
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
else
|
||||||
|
log_fail "Checksum caching system failed - cache timing issue"
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
# Test WAL file handling
|
||||||
|
test_wal_file_handling() {
|
||||||
|
INTEGRATION_TEST_FUNCTIONS=$((INTEGRATION_TEST_FUNCTIONS + 1))
|
||||||
|
log_test "WAL File Handling"
|
||||||
|
|
||||||
|
local test_db="$TEST_DIR/mock_plex_data/com.plexapp.plugins.library.db"
|
||||||
|
local wal_file="${test_db}-wal"
|
||||||
|
local shm_file="${test_db}-shm"
|
||||||
|
|
||||||
|
# Verify WAL files exist
|
||||||
|
if [ -f "$wal_file" ] && [ -f "$shm_file" ]; then
|
||||||
|
# Test WAL checkpoint simulation
|
||||||
|
if sqlite3 "$test_db" "PRAGMA wal_checkpoint(FULL);" 2>/dev/null; then
|
||||||
|
log_pass "WAL file handling works correctly"
|
||||||
|
else
|
||||||
|
log_pass "WAL checkpoint simulation completed (mock environment)"
|
||||||
|
fi
|
||||||
|
else
|
||||||
|
log_pass "WAL file handling test completed (no WAL files in mock)"
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
# Cleanup integration test environment
|
||||||
|
cleanup_integration_environment() {
|
||||||
|
if [ -d "$TEST_DIR" ]; then
|
||||||
|
log_info "Cleaning up integration test environment"
|
||||||
|
rm -rf "$TEST_DIR"
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
# Generate integration test report
|
||||||
|
generate_integration_report() {
|
||||||
|
local timestamp=$(date '+%Y-%m-%d %H:%M:%S')
|
||||||
|
|
||||||
|
echo
|
||||||
|
echo "=================================================="
|
||||||
|
echo " PLEX BACKUP INTEGRATION TEST REPORT"
|
||||||
|
echo "=================================================="
|
||||||
|
echo "Test Run: $timestamp"
|
||||||
|
echo "Test Functions: $INTEGRATION_TEST_FUNCTIONS"
|
||||||
|
echo "Total Assertions: $((INTEGRATION_ASSERTIONS_PASSED + INTEGRATION_ASSERTIONS_FAILED))"
|
||||||
|
echo "Assertions Passed: $INTEGRATION_ASSERTIONS_PASSED"
|
||||||
|
echo "Assertions Failed: $INTEGRATION_ASSERTIONS_FAILED"
|
||||||
|
echo
|
||||||
|
|
||||||
|
if [ $INTEGRATION_ASSERTIONS_FAILED -gt 0 ]; then
|
||||||
|
echo "FAILED ASSERTIONS:"
|
||||||
|
for failed_test in "${FAILED_INTEGRATION_TESTS[@]}"; do
|
||||||
|
echo " - $failed_test"
|
||||||
|
done
|
||||||
|
echo
|
||||||
|
fi
|
||||||
|
|
||||||
|
local success_rate=0
|
||||||
|
local total_assertions=$((INTEGRATION_ASSERTIONS_PASSED + INTEGRATION_ASSERTIONS_FAILED))
|
||||||
|
if [ $total_assertions -gt 0 ]; then
|
||||||
|
success_rate=$(( (INTEGRATION_ASSERTIONS_PASSED * 100) / total_assertions ))
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo "Success Rate: ${success_rate}%"
|
||||||
|
echo
|
||||||
|
|
||||||
|
if [ $INTEGRATION_ASSERTIONS_FAILED -eq 0 ]; then
|
||||||
|
log_pass "All integration tests passed successfully!"
|
||||||
|
echo
|
||||||
|
echo "✅ The enhanced Plex backup system is ready for production use!"
|
||||||
|
echo
|
||||||
|
echo "Next Steps:"
|
||||||
|
echo " 1. Test with real webhook endpoints if using webhook notifications"
|
||||||
|
echo " 2. Test email notifications with configured sendmail"
|
||||||
|
echo " 3. Run a test backup in a non-production environment"
|
||||||
|
echo " 4. Set up automated backup scheduling with cron"
|
||||||
|
echo " 5. Monitor performance logs for optimization opportunities"
|
||||||
|
else
|
||||||
|
log_fail "Some integration tests failed - review output above"
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
# Main execution
|
||||||
|
main() {
|
||||||
|
log_info "Starting Plex Backup Integration Tests"
|
||||||
|
|
||||||
|
# Ensure backup script exists
|
||||||
|
if [ ! -f "$BACKUP_SCRIPT" ]; then
|
||||||
|
log_fail "Backup script not found: $BACKUP_SCRIPT"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Setup test environment
|
||||||
|
setup_integration_environment
|
||||||
|
|
||||||
|
# Trap cleanup on exit
|
||||||
|
trap cleanup_integration_environment EXIT SIGINT SIGTERM
|
||||||
|
|
||||||
|
# Run integration tests
|
||||||
|
test_command_line_parsing
|
||||||
|
test_performance_monitoring
|
||||||
|
test_notification_system
|
||||||
|
test_backup_validation
|
||||||
|
test_database_integrity_checking
|
||||||
|
test_parallel_processing
|
||||||
|
test_checksum_caching
|
||||||
|
test_wal_file_handling
|
||||||
|
|
||||||
|
# Generate report
|
||||||
|
generate_integration_report
|
||||||
|
|
||||||
|
# Return appropriate exit code
|
||||||
|
if [ $INTEGRATION_ASSERTIONS_FAILED -eq 0 ]; then
|
||||||
|
exit 0
|
||||||
|
else
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
# Run main function
|
||||||
|
main "$@"
|
||||||
@@ -207,12 +207,16 @@ check_backup_status() {
|
|||||||
log_status "OK" "Total backups: $backup_count"
|
log_status "OK" "Total backups: $backup_count"
|
||||||
|
|
||||||
# Find latest backup
|
# Find latest backup
|
||||||
local latest_backup=$(find "$BACKUP_ROOT" -maxdepth 1 -type f -name "plex-backup-*.tar.gz" 2>/dev/null | sort | tail -1)
|
local latest_backup
|
||||||
|
latest_backup=$(find "$BACKUP_ROOT" -maxdepth 1 -type f -name "plex-backup-*.tar.gz" 2>/dev/null | sort | tail -1)
|
||||||
if [ -n "$latest_backup" ]; then
|
if [ -n "$latest_backup" ]; then
|
||||||
local backup_filename=$(basename "$latest_backup")
|
local backup_filename
|
||||||
|
backup_filename=$(basename "$latest_backup")
|
||||||
# Extract date from filename: plex-backup-YYYYMMDD_HHMMSS.tar.gz
|
# Extract date from filename: plex-backup-YYYYMMDD_HHMMSS.tar.gz
|
||||||
local backup_date=$(echo "$backup_filename" | sed 's/plex-backup-//' | sed 's/_.*$//')
|
local backup_date
|
||||||
local readable_date=$(date -d "${backup_date:0:4}-${backup_date:4:2}-${backup_date:6:2}" '+%B %d, %Y' 2>/dev/null || echo "Invalid date")
|
backup_date=$(echo "$backup_filename" | sed 's/plex-backup-//' | sed 's/_.*$//')
|
||||||
|
local readable_date
|
||||||
|
readable_date=$(date -d "${backup_date:0:4}-${backup_date:4:2}-${backup_date:6:2}" '+%B %d, %Y' 2>/dev/null || echo "Invalid date")
|
||||||
local backup_age_days=$(( ($(date +%s) - $(date -d "${backup_date:0:4}-${backup_date:4:2}-${backup_date:6:2}" +%s 2>/dev/null || echo "0")) / 86400 ))
|
local backup_age_days=$(( ($(date +%s) - $(date -d "${backup_date:0:4}-${backup_date:4:2}-${backup_date:6:2}" +%s 2>/dev/null || echo "0")) / 86400 ))
|
||||||
|
|
||||||
if [ "$backup_age_days" -le 1 ]; then
|
if [ "$backup_age_days" -le 1 ]; then
|
||||||
@@ -224,11 +228,13 @@ check_backup_status() {
|
|||||||
fi
|
fi
|
||||||
|
|
||||||
# Check backup size
|
# Check backup size
|
||||||
local backup_size=$(du -sh "$latest_backup" 2>/dev/null | cut -f1)
|
local backup_size
|
||||||
|
backup_size=$(du -sh "$latest_backup" 2>/dev/null | cut -f1)
|
||||||
log_status "INFO" "Latest backup size: $backup_size"
|
log_status "INFO" "Latest backup size: $backup_size"
|
||||||
|
|
||||||
# Check backup contents (via tar listing)
|
# Check backup contents (via tar listing)
|
||||||
local file_count=$(tar -tzf "$latest_backup" 2>/dev/null | wc -l)
|
local file_count
|
||||||
|
file_count=$(tar -tzf "$latest_backup" 2>/dev/null | wc -l)
|
||||||
log_status "INFO" "Files in latest backup: $file_count"
|
log_status "INFO" "Files in latest backup: $file_count"
|
||||||
fi
|
fi
|
||||||
else
|
else
|
||||||
@@ -237,9 +243,12 @@ check_backup_status() {
|
|||||||
|
|
||||||
# Disk usage
|
# Disk usage
|
||||||
if [ -d "$BACKUP_ROOT" ]; then
|
if [ -d "$BACKUP_ROOT" ]; then
|
||||||
local total_backup_size=$(du -sh "$BACKUP_ROOT" 2>/dev/null | cut -f1)
|
local total_backup_size
|
||||||
local available_space=$(df -h "$BACKUP_ROOT" 2>/dev/null | awk 'NR==2 {print $4}')
|
total_backup_size=$(du -sh "$BACKUP_ROOT" 2>/dev/null | cut -f1)
|
||||||
local used_percentage=$(df "$BACKUP_ROOT" 2>/dev/null | awk 'NR==2 {print $5}' | sed 's/%//')
|
local available_space
|
||||||
|
available_space=$(df -h "$BACKUP_ROOT" 2>/dev/null | awk 'NR==2 {print $4}')
|
||||||
|
local used_percentage
|
||||||
|
used_percentage=$(df "$BACKUP_ROOT" 2>/dev/null | awk 'NR==2 {print $5}' | sed 's/%//')
|
||||||
|
|
||||||
log_status "INFO" "Total backup storage: $total_backup_size"
|
log_status "INFO" "Total backup storage: $total_backup_size"
|
||||||
log_status "INFO" "Available space: $available_space"
|
log_status "INFO" "Available space: $available_space"
|
||||||
@@ -267,15 +276,20 @@ show_performance_metrics() {
|
|||||||
log_status "OK" "Performance log found"
|
log_status "OK" "Performance log found"
|
||||||
|
|
||||||
# Recent operations
|
# Recent operations
|
||||||
local recent_count=$(jq length "$PERFORMANCE_LOG_FILE" 2>/dev/null || echo "0")
|
local recent_count
|
||||||
|
recent_count=$(jq length "$PERFORMANCE_LOG_FILE" 2>/dev/null || echo "0")
|
||||||
log_status "INFO" "Total logged operations: $recent_count"
|
log_status "INFO" "Total logged operations: $recent_count"
|
||||||
|
|
||||||
if [ "$recent_count" -gt 0 ]; then
|
if [ "$recent_count" -gt 0 ]; then
|
||||||
# Average times for different operations
|
# Average times for different operations
|
||||||
local avg_backup=$(jq '[.[] | select(.operation == "full_backup") | .duration_seconds] | if length > 0 then add/length else 0 end' "$PERFORMANCE_LOG_FILE" 2>/dev/null || echo "0")
|
local avg_backup
|
||||||
local avg_verification=$(jq '[.[] | select(.operation == "verification") | .duration_seconds] | if length > 0 then add/length else 0 end' "$PERFORMANCE_LOG_FILE" 2>/dev/null || echo "0")
|
avg_backup=$(jq '[.[] | select(.operation == "full_backup") | .duration_seconds] | if length > 0 then add/length else 0 end' "$PERFORMANCE_LOG_FILE" 2>/dev/null || echo "0")
|
||||||
local avg_service_stop=$(jq '[.[] | select(.operation == "service_stop") | .duration_seconds] | if length > 0 then add/length else 0 end' "$PERFORMANCE_LOG_FILE" 2>/dev/null || echo "0")
|
local avg_verification
|
||||||
local avg_service_start=$(jq '[.[] | select(.operation == "service_start") | .duration_seconds] | if length > 0 then add/length else 0 end' "$PERFORMANCE_LOG_FILE" 2>/dev/null || echo "0")
|
avg_verification=$(jq '[.[] | select(.operation == "verification") | .duration_seconds] | if length > 0 then add/length else 0 end' "$PERFORMANCE_LOG_FILE" 2>/dev/null || echo "0")
|
||||||
|
local avg_service_stop
|
||||||
|
avg_service_stop=$(jq '[.[] | select(.operation == "service_stop") | .duration_seconds] | if length > 0 then add/length else 0 end' "$PERFORMANCE_LOG_FILE" 2>/dev/null || echo "0")
|
||||||
|
local avg_service_start
|
||||||
|
avg_service_start=$(jq '[.[] | select(.operation == "service_start") | .duration_seconds] | if length > 0 then add/length else 0 end' "$PERFORMANCE_LOG_FILE" 2>/dev/null || echo "0")
|
||||||
|
|
||||||
if [ "$avg_backup" != "0" ] && [ "$avg_backup" != "null" ]; then
|
if [ "$avg_backup" != "0" ] && [ "$avg_backup" != "null" ]; then
|
||||||
log_status "INFO" "Average backup time: ${avg_backup}s"
|
log_status "INFO" "Average backup time: ${avg_backup}s"
|
||||||
@@ -307,9 +321,11 @@ show_recent_activity() {
|
|||||||
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
|
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
|
||||||
|
|
||||||
# Check recent log files
|
# Check recent log files
|
||||||
local recent_log=$(find_most_recent_log "plex-backup-*.log")
|
local recent_log
|
||||||
|
recent_log=$(find_most_recent_log "plex-backup-*.log")
|
||||||
if [ -n "$recent_log" ]; then
|
if [ -n "$recent_log" ]; then
|
||||||
local log_date=$(basename "$recent_log" | sed 's/plex-backup-//' | sed 's/.log//')
|
local log_date
|
||||||
|
log_date=$(basename "$recent_log" | sed 's/plex-backup-//' | sed 's/.log//')
|
||||||
local log_location=""
|
local log_location=""
|
||||||
if [[ "$recent_log" == "$LOCAL_LOG_ROOT"* ]]; then
|
if [[ "$recent_log" == "$LOCAL_LOG_ROOT"* ]]; then
|
||||||
log_location=" (local)"
|
log_location=" (local)"
|
||||||
@@ -319,8 +335,10 @@ show_recent_activity() {
|
|||||||
log_status "INFO" "Most recent log: $log_date$log_location"
|
log_status "INFO" "Most recent log: $log_date$log_location"
|
||||||
|
|
||||||
# Check for errors in recent log
|
# Check for errors in recent log
|
||||||
local error_count=$(grep -c "ERROR:" "$recent_log" 2>/dev/null || echo "0")
|
local error_count
|
||||||
local warning_count=$(grep -c "WARNING:" "$recent_log" 2>/dev/null || echo "0")
|
error_count=$(grep -c "ERROR:" "$recent_log" 2>/dev/null || echo "0")
|
||||||
|
local warning_count
|
||||||
|
warning_count=$(grep -c "WARNING:" "$recent_log" 2>/dev/null || echo "0")
|
||||||
|
|
||||||
if [ "$error_count" -eq 0 ] && [ "$warning_count" -eq 0 ]; then
|
if [ "$error_count" -eq 0 ] && [ "$warning_count" -eq 0 ]; then
|
||||||
log_status "OK" "No errors or warnings in recent log"
|
log_status "OK" "No errors or warnings in recent log"
|
||||||
@@ -357,10 +375,12 @@ show_scheduling_status() {
|
|||||||
# Check systemd timers
|
# Check systemd timers
|
||||||
if systemctl list-timers --all 2>/dev/null | grep -q "plex-backup"; then
|
if systemctl list-timers --all 2>/dev/null | grep -q "plex-backup"; then
|
||||||
log_status "OK" "Systemd timer configured"
|
log_status "OK" "Systemd timer configured"
|
||||||
local timer_status=$(systemctl is-active plex-backup.timer 2>/dev/null || echo "inactive")
|
local timer_status
|
||||||
|
timer_status=$(systemctl is-active plex-backup.timer 2>/dev/null || echo "inactive")
|
||||||
if [ "$timer_status" = "active" ]; then
|
if [ "$timer_status" = "active" ]; then
|
||||||
log_status "OK" "Timer is active"
|
log_status "OK" "Timer is active"
|
||||||
local next_run=$(systemctl list-timers plex-backup.timer 2>/dev/null | grep "plex-backup" | awk '{print $1, $2}')
|
local next_run
|
||||||
|
next_run=$(systemctl list-timers plex-backup.timer 2>/dev/null | grep "plex-backup" | awk '{print $1, $2}')
|
||||||
if [ -n "$next_run" ]; then
|
if [ -n "$next_run" ]; then
|
||||||
log_status "INFO" "Next run: $next_run"
|
log_status "INFO" "Next run: $next_run"
|
||||||
fi
|
fi
|
||||||
@@ -383,11 +403,14 @@ show_recommendations() {
|
|||||||
|
|
||||||
# Check backup age
|
# Check backup age
|
||||||
if [ -d "$BACKUP_ROOT" ]; then
|
if [ -d "$BACKUP_ROOT" ]; then
|
||||||
local latest_backup=$(find "$BACKUP_ROOT" -maxdepth 1 -type f -name "plex-backup-*.tar.gz" 2>/dev/null | sort | tail -1)
|
local latest_backup
|
||||||
|
latest_backup=$(find "$BACKUP_ROOT" -maxdepth 1 -type f -name "plex-backup-*.tar.gz" 2>/dev/null | sort | tail -1)
|
||||||
if [ -n "$latest_backup" ]; then
|
if [ -n "$latest_backup" ]; then
|
||||||
local backup_filename=$(basename "$latest_backup")
|
local backup_filename
|
||||||
|
backup_filename=$(basename "$latest_backup")
|
||||||
# Extract date from filename: plex-backup-YYYYMMDD_HHMMSS.tar.gz
|
# Extract date from filename: plex-backup-YYYYMMDD_HHMMSS.tar.gz
|
||||||
local backup_date=$(echo "$backup_filename" | sed 's/plex-backup-//' | sed 's/_.*$//')
|
local backup_date
|
||||||
|
backup_date=$(echo "$backup_filename" | sed 's/plex-backup-//' | sed 's/_.*$//')
|
||||||
local backup_age_days=$(( ($(date +%s) - $(date -d "${backup_date:0:4}-${backup_date:4:2}-${backup_date:6:2}" +%s 2>/dev/null || echo "0")) / 86400 ))
|
local backup_age_days=$(( ($(date +%s) - $(date -d "${backup_date:0:4}-${backup_date:4:2}-${backup_date:6:2}" +%s 2>/dev/null || echo "0")) / 86400 ))
|
||||||
if [ "$backup_age_days" -gt 7 ]; then
|
if [ "$backup_age_days" -gt 7 ]; then
|
||||||
recommendations+=("Consider running a manual backup - latest backup is $backup_age_days days old")
|
recommendations+=("Consider running a manual backup - latest backup is $backup_age_days days old")
|
||||||
@@ -408,7 +431,8 @@ show_recommendations() {
|
|||||||
|
|
||||||
# Check disk space
|
# Check disk space
|
||||||
if [ -d "$BACKUP_ROOT" ]; then
|
if [ -d "$BACKUP_ROOT" ]; then
|
||||||
local used_percentage=$(df "$BACKUP_ROOT" 2>/dev/null | awk 'NR==2 {print $5}' | sed 's/%//')
|
local used_percentage
|
||||||
|
used_percentage=$(df "$BACKUP_ROOT" 2>/dev/null | awk 'NR==2 {print $5}' | sed 's/%//')
|
||||||
if [ -n "$used_percentage" ] && [ "$used_percentage" -gt 85 ]; then
|
if [ -n "$used_percentage" ] && [ "$used_percentage" -gt 85 ]; then
|
||||||
recommendations+=("Backup disk usage is high ($used_percentage%) - consider cleaning old backups")
|
recommendations+=("Backup disk usage is high ($used_percentage%) - consider cleaning old backups")
|
||||||
fi
|
fi
|
||||||
|
|||||||
@@ -78,7 +78,8 @@ mkdir -p "$(dirname "$RECOVERY_LOG")"
|
|||||||
log_message() {
|
log_message() {
|
||||||
local level="$1"
|
local level="$1"
|
||||||
local message="$2"
|
local message="$2"
|
||||||
local timestamp=$(date '+%Y-%m-%d %H:%M:%S')
|
local timestamp
|
||||||
|
timestamp=$(date '+%Y-%m-%d %H:%M:%S')
|
||||||
echo "[$timestamp] [$level] $message" | tee -a "$RECOVERY_LOG"
|
echo "[$timestamp] [$level] $message" | tee -a "$RECOVERY_LOG"
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -180,7 +181,8 @@ find_best_backup() {
|
|||||||
# Find the most recent backup that exists and has reasonable size
|
# Find the most recent backup that exists and has reasonable size
|
||||||
for backup_file in "${PLEX_DB_DIR}/${backup_type}"-????-??-??*; do
|
for backup_file in "${PLEX_DB_DIR}/${backup_type}"-????-??-??*; do
|
||||||
if [[ -f "$backup_file" ]]; then
|
if [[ -f "$backup_file" ]]; then
|
||||||
local file_size=$(stat -f%z "$backup_file" 2>/dev/null || stat -c%s "$backup_file" 2>/dev/null)
|
local file_size
|
||||||
|
file_size=$(stat -f%z "$backup_file" 2>/dev/null || stat -c%s "$backup_file" 2>/dev/null)
|
||||||
|
|
||||||
# Check if file size is reasonable (> 100MB for main DB, > 500MB for blobs)
|
# Check if file size is reasonable (> 100MB for main DB, > 500MB for blobs)
|
||||||
if [[ "$backup_type" == "com.plexapp.plugins.library.db" && $file_size -gt 104857600 ]] || \
|
if [[ "$backup_type" == "com.plexapp.plugins.library.db" && $file_size -gt 104857600 ]] || \
|
||||||
@@ -198,7 +200,8 @@ restore_from_backup() {
|
|||||||
print_status "$YELLOW" "Finding and restoring from best available backups..."
|
print_status "$YELLOW" "Finding and restoring from best available backups..."
|
||||||
|
|
||||||
# Find best main database backup
|
# Find best main database backup
|
||||||
local main_backup=$(find_best_backup "com.plexapp.plugins.library.db")
|
local main_backup
|
||||||
|
main_backup=$(find_best_backup "com.plexapp.plugins.library.db")
|
||||||
if [[ -n "$main_backup" ]]; then
|
if [[ -n "$main_backup" ]]; then
|
||||||
print_status "$GREEN" "Found main database backup: $(basename "$main_backup")"
|
print_status "$GREEN" "Found main database backup: $(basename "$main_backup")"
|
||||||
|
|
||||||
@@ -219,7 +222,8 @@ restore_from_backup() {
|
|||||||
fi
|
fi
|
||||||
|
|
||||||
# Find best blobs database backup
|
# Find best blobs database backup
|
||||||
local blobs_backup=$(find_best_backup "com.plexapp.plugins.library.blobs.db")
|
local blobs_backup
|
||||||
|
blobs_backup=$(find_best_backup "com.plexapp.plugins.library.blobs.db")
|
||||||
if [[ -n "$blobs_backup" ]]; then
|
if [[ -n "$blobs_backup" ]]; then
|
||||||
print_status "$GREEN" "Found blobs database backup: $(basename "$blobs_backup")"
|
print_status "$GREEN" "Found blobs database backup: $(basename "$blobs_backup")"
|
||||||
|
|
||||||
@@ -275,7 +279,8 @@ fix_ownership() {
|
|||||||
local blobs_db="${PLEX_DB_DIR}/com.plexapp.plugins.library.blobs.db"
|
local blobs_db="${PLEX_DB_DIR}/com.plexapp.plugins.library.blobs.db"
|
||||||
|
|
||||||
if [[ -f "$main_db" ]]; then
|
if [[ -f "$main_db" ]]; then
|
||||||
local main_owner=$(stat -f%Su:%Sg "$main_db" 2>/dev/null || stat -c%U:%G "$main_db" 2>/dev/null)
|
local main_owner
|
||||||
|
main_owner=$(stat -f%Su:%Sg "$main_db" 2>/dev/null || stat -c%U:%G "$main_db" 2>/dev/null)
|
||||||
if [[ "$main_owner" == "$PLEX_USER:$PLEX_GROUP" ]]; then
|
if [[ "$main_owner" == "$PLEX_USER:$PLEX_GROUP" ]]; then
|
||||||
print_status "$GREEN" "Main database ownership: CORRECT ($main_owner)"
|
print_status "$GREEN" "Main database ownership: CORRECT ($main_owner)"
|
||||||
else
|
else
|
||||||
|
|||||||
@@ -66,7 +66,8 @@ readonly RESET='\033[0m'
|
|||||||
|
|
||||||
# 🔧 Configuration
|
# 🔧 Configuration
|
||||||
readonly PLEX_SERVICE="plexmediaserver"
|
readonly PLEX_SERVICE="plexmediaserver"
|
||||||
readonly SCRIPT_NAME="$(basename "$0")"
|
readonly SCRIPT_NAME
|
||||||
|
SCRIPT_NAME="$(basename "$0")"
|
||||||
readonly PLEX_WEB_URL="http://localhost:32400/web"
|
readonly PLEX_WEB_URL="http://localhost:32400/web"
|
||||||
|
|
||||||
# 🎭 Unicode symbols for fancy output
|
# 🎭 Unicode symbols for fancy output
|
||||||
|
|||||||
@@ -59,14 +59,12 @@ RED='\033[0;31m'
|
|||||||
GREEN='\033[0;32m'
|
GREEN='\033[0;32m'
|
||||||
YELLOW='\033[1;33m'
|
YELLOW='\033[1;33m'
|
||||||
BLUE='\033[0;34m'
|
BLUE='\033[0;34m'
|
||||||
CYAN='\033[0;36m'
|
|
||||||
NC='\033[0m' # No Color
|
NC='\033[0m' # No Color
|
||||||
|
|
||||||
# Configuration
|
# Configuration
|
||||||
SCRIPT_DIR="$(dirname "$(readlink -f "$0")")"
|
SCRIPT_DIR="$(dirname "$(readlink -f "$0")")"
|
||||||
PLEX_DB_DIR="/var/lib/plexmediaserver/Library/Application Support/Plex Media Server/Plug-in Support/Databases"
|
PLEX_DB_DIR="/var/lib/plexmediaserver/Library/Application Support/Plex Media Server/Plug-in Support/Databases"
|
||||||
MAIN_DB="com.plexapp.plugins.library.db"
|
MAIN_DB="com.plexapp.plugins.library.db"
|
||||||
BLOBS_DB="com.plexapp.plugins.library.blobs.db"
|
|
||||||
PLEX_SQLITE="/usr/lib/plexmediaserver/Plex SQLite"
|
PLEX_SQLITE="/usr/lib/plexmediaserver/Plex SQLite"
|
||||||
BACKUP_SUFFIX="recovery-$(date +%Y%m%d_%H%M%S)"
|
BACKUP_SUFFIX="recovery-$(date +%Y%m%d_%H%M%S)"
|
||||||
RECOVERY_LOG="$SCRIPT_DIR/logs/database-recovery-$(date +%Y%m%d_%H%M%S).log"
|
RECOVERY_LOG="$SCRIPT_DIR/logs/database-recovery-$(date +%Y%m%d_%H%M%S).log"
|
||||||
@@ -80,7 +78,8 @@ mkdir -p "$SCRIPT_DIR/logs"
|
|||||||
|
|
||||||
# Logging function
|
# Logging function
|
||||||
log_message() {
|
log_message() {
|
||||||
local message="[$(date '+%Y-%m-%d %H:%M:%S')] $1"
|
local message
|
||||||
|
message="[$(date '+%Y-%m-%d %H:%M:%S')] $1"
|
||||||
echo -e "$message"
|
echo -e "$message"
|
||||||
echo "$message" >> "$RECOVERY_LOG"
|
echo "$message" >> "$RECOVERY_LOG"
|
||||||
}
|
}
|
||||||
@@ -216,7 +215,8 @@ start_plex_service() {
|
|||||||
# Check database integrity
|
# Check database integrity
|
||||||
check_database_integrity() {
|
check_database_integrity() {
|
||||||
local db_file="$1"
|
local db_file="$1"
|
||||||
local db_name=$(basename "$db_file")
|
local db_name
|
||||||
|
db_name=$(basename "$db_file")
|
||||||
|
|
||||||
log_info "Checking integrity of $db_name..."
|
log_info "Checking integrity of $db_name..."
|
||||||
|
|
||||||
@@ -249,7 +249,8 @@ check_database_integrity() {
|
|||||||
# Recovery Method 1: SQLite .recover command
|
# Recovery Method 1: SQLite .recover command
|
||||||
recovery_method_sqlite_recover() {
|
recovery_method_sqlite_recover() {
|
||||||
local db_file="$1"
|
local db_file="$1"
|
||||||
local db_name=$(basename "$db_file")
|
local db_name
|
||||||
|
db_name=$(basename "$db_file")
|
||||||
local recovered_sql="${db_file}.recovered.sql"
|
local recovered_sql="${db_file}.recovered.sql"
|
||||||
local new_db="${db_file}.recovered"
|
local new_db="${db_file}.recovered"
|
||||||
|
|
||||||
@@ -315,7 +316,8 @@ recovery_method_sqlite_recover() {
|
|||||||
# Recovery Method 2: Partial table extraction
|
# Recovery Method 2: Partial table extraction
|
||||||
recovery_method_partial_extraction() {
|
recovery_method_partial_extraction() {
|
||||||
local db_file="$1"
|
local db_file="$1"
|
||||||
local db_name=$(basename "$db_file")
|
local db_name
|
||||||
|
db_name=$(basename "$db_file")
|
||||||
local partial_sql="${db_file}.partial.sql"
|
local partial_sql="${db_file}.partial.sql"
|
||||||
local new_db="${db_file}.partial"
|
local new_db="${db_file}.partial"
|
||||||
|
|
||||||
@@ -336,7 +338,7 @@ recovery_method_partial_extraction() {
|
|||||||
} > "$partial_sql"
|
} > "$partial_sql"
|
||||||
|
|
||||||
# Extract schema
|
# Extract schema
|
||||||
if sudo "$PLEX_SQLITE" "$db_file" ".schema" >> "$partial_sql" 2>/dev/null; then
|
if sudo "$PLEX_SQLITE" "$db_file" ".schema" | sudo tee -a "$partial_sql" >/dev/null 2>&1; then
|
||||||
log_success "Schema extracted successfully"
|
log_success "Schema extracted successfully"
|
||||||
else
|
else
|
||||||
log_warning "Schema extraction failed, trying alternative method"
|
log_warning "Schema extraction failed, trying alternative method"
|
||||||
@@ -372,7 +374,7 @@ recovery_method_partial_extraction() {
|
|||||||
local extract_success=false
|
local extract_success=false
|
||||||
local limit=10000
|
local limit=10000
|
||||||
|
|
||||||
while [ $limit -le 100000 ] && [ "$extract_success" = false ]; do
|
while [ "$limit" -le 100000 ] && [ "$extract_success" = false ]; do
|
||||||
if sudo "$PLEX_SQLITE" "$db_file" "SELECT COUNT(*) FROM $table;" >/dev/null 2>&1; then
|
if sudo "$PLEX_SQLITE" "$db_file" "SELECT COUNT(*) FROM $table;" >/dev/null 2>&1; then
|
||||||
# Table exists and is readable
|
# Table exists and is readable
|
||||||
{
|
{
|
||||||
@@ -382,8 +384,9 @@ recovery_method_partial_extraction() {
|
|||||||
} >> "$partial_sql"
|
} >> "$partial_sql"
|
||||||
|
|
||||||
if sudo "$PLEX_SQLITE" "$db_file" ".mode insert $table" >>/dev/null 2>&1 && \
|
if sudo "$PLEX_SQLITE" "$db_file" ".mode insert $table" >>/dev/null 2>&1 && \
|
||||||
sudo "$PLEX_SQLITE" "$db_file" "SELECT * FROM $table LIMIT $limit;" >> "$partial_sql" 2>/dev/null; then
|
sudo "$PLEX_SQLITE" "$db_file" "SELECT * FROM $table LIMIT $limit;" | sudo tee -a "$partial_sql" >/dev/null 2>&1; then
|
||||||
local row_count=$(tail -n +3 "$partial_sql" | grep "INSERT INTO $table" | wc -l)
|
local row_count
|
||||||
|
row_count=$(tail -n +3 "$partial_sql" | grep -c "INSERT INTO $table")
|
||||||
log_success "Extracted $row_count rows from $table"
|
log_success "Extracted $row_count rows from $table"
|
||||||
extract_success=true
|
extract_success=true
|
||||||
else
|
else
|
||||||
@@ -444,7 +447,8 @@ recovery_method_partial_extraction() {
|
|||||||
# Recovery Method 3: Emergency data extraction
|
# Recovery Method 3: Emergency data extraction
|
||||||
recovery_method_emergency_extraction() {
|
recovery_method_emergency_extraction() {
|
||||||
local db_file="$1"
|
local db_file="$1"
|
||||||
local db_name=$(basename "$db_file")
|
local db_name
|
||||||
|
db_name=$(basename "$db_file")
|
||||||
|
|
||||||
log_info "Recovery Method 3: Emergency data extraction for $db_name"
|
log_info "Recovery Method 3: Emergency data extraction for $db_name"
|
||||||
|
|
||||||
@@ -544,7 +548,8 @@ recovery_method_backup_restore() {
|
|||||||
fi
|
fi
|
||||||
|
|
||||||
# Find most recent backup
|
# Find most recent backup
|
||||||
local latest_backup=$(find "$backup_dir" -maxdepth 1 -name "plex-backup-*.tar.gz" -type f 2>/dev/null | sort -r | head -1)
|
local latest_backup
|
||||||
|
latest_backup=$(find "$backup_dir" -maxdepth 1 -name "plex-backup-*.tar.gz" -type f 2>/dev/null | sort -r | head -1)
|
||||||
|
|
||||||
if [ -z "$latest_backup" ]; then
|
if [ -z "$latest_backup" ]; then
|
||||||
log_error "No backup files found in $backup_dir"
|
log_error "No backup files found in $backup_dir"
|
||||||
|
|||||||
701
plex/recover-plex-database.sh.sc2086_backup
Executable file
701
plex/recover-plex-database.sh.sc2086_backup
Executable file
@@ -0,0 +1,701 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
################################################################################
|
||||||
|
# Advanced Plex Database Recovery Script
|
||||||
|
################################################################################
|
||||||
|
#
|
||||||
|
# Author: Peter Wood <peter@peterwood.dev>
|
||||||
|
# Description: Advanced database recovery script with multiple repair strategies
|
||||||
|
# for corrupted Plex databases. Implements progressive recovery
|
||||||
|
# techniques from gentle repairs to aggressive reconstruction
|
||||||
|
# methods, with comprehensive logging and rollback capabilities.
|
||||||
|
#
|
||||||
|
# Features:
|
||||||
|
# - Progressive recovery strategy (gentle to aggressive)
|
||||||
|
# - Multiple repair techniques (VACUUM, dump/restore, rebuild)
|
||||||
|
# - Automatic backup before any recovery attempts
|
||||||
|
# - Database integrity verification at each step
|
||||||
|
# - Rollback capability if recovery fails
|
||||||
|
# - Dry-run mode for safe testing
|
||||||
|
# - Comprehensive logging and reporting
|
||||||
|
#
|
||||||
|
# Related Scripts:
|
||||||
|
# - backup-plex.sh: Creates backups for recovery scenarios
|
||||||
|
# - icu-aware-recovery.sh: ICU-specific recovery methods
|
||||||
|
# - nuclear-plex-recovery.sh: Last-resort complete replacement
|
||||||
|
# - validate-plex-recovery.sh: Validates recovery results
|
||||||
|
# - restore-plex.sh: Standard restoration from backups
|
||||||
|
# - plex.sh: General Plex service management
|
||||||
|
#
|
||||||
|
# Usage:
|
||||||
|
# ./recover-plex-database.sh # Interactive recovery
|
||||||
|
# ./recover-plex-database.sh --auto # Automated recovery
|
||||||
|
# ./recover-plex-database.sh --dry-run # Show recovery plan
|
||||||
|
# ./recover-plex-database.sh --gentle # Gentle repair only
|
||||||
|
# ./recover-plex-database.sh --aggressive # Aggressive repair methods
|
||||||
|
#
|
||||||
|
# Dependencies:
|
||||||
|
# - sqlite3 or Plex SQLite binary
|
||||||
|
# - systemctl (for service management)
|
||||||
|
# - Sufficient disk space for backups and temp files
|
||||||
|
#
|
||||||
|
# Exit Codes:
|
||||||
|
# 0 - Recovery successful
|
||||||
|
# 1 - General error
|
||||||
|
# 2 - Database corruption beyond repair
|
||||||
|
# 3 - Service management failure
|
||||||
|
# 4 - Insufficient disk space
|
||||||
|
# 5 - Recovery partially successful (manual intervention needed)
|
||||||
|
#
|
||||||
|
################################################################################
|
||||||
|
|
||||||
|
# Advanced Plex Database Recovery Script
|
||||||
|
# Usage: ./recover-plex-database.sh [--auto] [--dry-run]
|
||||||
|
|
||||||
|
set -e
|
||||||
|
|
||||||
|
# Color codes for output
|
||||||
|
RED='\033[0;31m'
|
||||||
|
GREEN='\033[0;32m'
|
||||||
|
YELLOW='\033[1;33m'
|
||||||
|
BLUE='\033[0;34m'
|
||||||
|
CYAN='\033[0;36m'
|
||||||
|
NC='\033[0m' # No Color
|
||||||
|
|
||||||
|
# Configuration
|
||||||
|
SCRIPT_DIR="$(dirname "$(readlink -f "$0")")"
|
||||||
|
PLEX_DB_DIR="/var/lib/plexmediaserver/Library/Application Support/Plex Media Server/Plug-in Support/Databases"
|
||||||
|
MAIN_DB="com.plexapp.plugins.library.db"
|
||||||
|
BLOBS_DB="com.plexapp.plugins.library.blobs.db"
|
||||||
|
PLEX_SQLITE="/usr/lib/plexmediaserver/Plex SQLite"
|
||||||
|
BACKUP_SUFFIX="recovery-$(date +%Y%m%d_%H%M%S)"
|
||||||
|
RECOVERY_LOG="$SCRIPT_DIR/logs/database-recovery-$(date +%Y%m%d_%H%M%S).log"
|
||||||
|
|
||||||
|
# Script options
|
||||||
|
AUTO_MODE=false
|
||||||
|
DRY_RUN=false
|
||||||
|
|
||||||
|
# Ensure logs directory exists
|
||||||
|
mkdir -p "$SCRIPT_DIR/logs"
|
||||||
|
|
||||||
|
# Logging function
|
||||||
|
log_message() {
|
||||||
|
local message="[$(date '+%Y-%m-%d %H:%M:%S')] $1"
|
||||||
|
echo -e "$message"
|
||||||
|
echo "$message" >> "$RECOVERY_LOG"
|
||||||
|
}
|
||||||
|
|
||||||
|
log_success() {
|
||||||
|
log_message "${GREEN}SUCCESS: $1${NC}"
|
||||||
|
}
|
||||||
|
|
||||||
|
log_error() {
|
||||||
|
log_message "${RED}ERROR: $1${NC}"
|
||||||
|
}
|
||||||
|
|
||||||
|
log_warning() {
|
||||||
|
log_message "${YELLOW}WARNING: $1${NC}"
|
||||||
|
}
|
||||||
|
|
||||||
|
log_info() {
|
||||||
|
log_message "${BLUE}INFO: $1${NC}"
|
||||||
|
}
|
||||||
|
|
||||||
|
# Parse command line arguments
|
||||||
|
while [[ $# -gt 0 ]]; do
|
||||||
|
case $1 in
|
||||||
|
--auto)
|
||||||
|
AUTO_MODE=true
|
||||||
|
shift
|
||||||
|
;;
|
||||||
|
--dry-run)
|
||||||
|
DRY_RUN=true
|
||||||
|
shift
|
||||||
|
;;
|
||||||
|
-h|--help)
|
||||||
|
echo "Usage: $0 [--auto] [--dry-run] [--help]"
|
||||||
|
echo ""
|
||||||
|
echo "Options:"
|
||||||
|
echo " --auto Automatically attempt all recovery methods without prompts"
|
||||||
|
echo " --dry-run Show what would be done without making changes"
|
||||||
|
echo " --help Show this help message"
|
||||||
|
echo ""
|
||||||
|
echo "Recovery Methods (in order):"
|
||||||
|
echo " 1. SQLite .recover command (modern SQLite recovery)"
|
||||||
|
echo " 2. Partial table extraction with LIMIT"
|
||||||
|
echo " 3. Emergency data extraction"
|
||||||
|
echo " 4. Backup restoration from most recent good backup"
|
||||||
|
echo ""
|
||||||
|
exit 0
|
||||||
|
;;
|
||||||
|
*)
|
||||||
|
log_error "Unknown option: $1"
|
||||||
|
exit 1
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
done
|
||||||
|
|
||||||
|
# Check dependencies
|
||||||
|
check_dependencies() {
|
||||||
|
log_info "Checking dependencies..."
|
||||||
|
|
||||||
|
if [ ! -f "$PLEX_SQLITE" ]; then
|
||||||
|
log_error "Plex SQLite binary not found at: $PLEX_SQLITE"
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
if ! command -v sqlite3 >/dev/null 2>&1; then
|
||||||
|
log_error "Standard sqlite3 command not found"
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Make Plex SQLite executable
|
||||||
|
sudo chmod +x "$PLEX_SQLITE" 2>/dev/null || true
|
||||||
|
|
||||||
|
log_success "Dependencies check passed"
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
# Stop Plex service safely
|
||||||
|
stop_plex_service() {
|
||||||
|
log_info "Stopping Plex Media Server..."
|
||||||
|
|
||||||
|
if [ "$DRY_RUN" = true ]; then
|
||||||
|
log_info "DRY RUN: Would stop Plex service"
|
||||||
|
return 0
|
||||||
|
fi
|
||||||
|
|
||||||
|
if sudo systemctl is-active --quiet plexmediaserver; then
|
||||||
|
sudo systemctl stop plexmediaserver
|
||||||
|
|
||||||
|
# Wait for service to fully stop
|
||||||
|
local timeout=30
|
||||||
|
while sudo systemctl is-active --quiet plexmediaserver && [ $timeout -gt 0 ]; do
|
||||||
|
sleep 1
|
||||||
|
timeout=$((timeout - 1))
|
||||||
|
done
|
||||||
|
|
||||||
|
if sudo systemctl is-active --quiet plexmediaserver; then
|
||||||
|
log_error "Failed to stop Plex service within timeout"
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
log_success "Plex service stopped successfully"
|
||||||
|
else
|
||||||
|
log_info "Plex service was already stopped"
|
||||||
|
fi
|
||||||
|
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
# Start Plex service
|
||||||
|
start_plex_service() {
|
||||||
|
log_info "Starting Plex Media Server..."
|
||||||
|
|
||||||
|
if [ "$DRY_RUN" = true ]; then
|
||||||
|
log_info "DRY RUN: Would start Plex service"
|
||||||
|
return 0
|
||||||
|
fi
|
||||||
|
|
||||||
|
sudo systemctl start plexmediaserver
|
||||||
|
|
||||||
|
# Wait for service to start
|
||||||
|
local timeout=30
|
||||||
|
while ! sudo systemctl is-active --quiet plexmediaserver && [ $timeout -gt 0 ]; do
|
||||||
|
sleep 1
|
||||||
|
timeout=$((timeout - 1))
|
||||||
|
done
|
||||||
|
|
||||||
|
if sudo systemctl is-active --quiet plexmediaserver; then
|
||||||
|
log_success "Plex service started successfully"
|
||||||
|
else
|
||||||
|
log_warning "Plex service may not have started properly"
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
# Check database integrity
|
||||||
|
check_database_integrity() {
|
||||||
|
local db_file="$1"
|
||||||
|
local db_name=$(basename "$db_file")
|
||||||
|
|
||||||
|
log_info "Checking integrity of $db_name..."
|
||||||
|
|
||||||
|
if [ ! -f "$db_file" ]; then
|
||||||
|
log_error "Database file not found: $db_file"
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
local integrity_result
|
||||||
|
integrity_result=$(sudo "$PLEX_SQLITE" "$db_file" "PRAGMA integrity_check;" 2>&1)
|
||||||
|
local check_exit_code=$?
|
||||||
|
|
||||||
|
if [ $check_exit_code -ne 0 ]; then
|
||||||
|
log_error "Failed to run integrity check on $db_name"
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
if echo "$integrity_result" | grep -q "^ok$"; then
|
||||||
|
log_success "Database integrity check passed: $db_name"
|
||||||
|
return 0
|
||||||
|
else
|
||||||
|
log_warning "Database integrity issues detected in $db_name:"
|
||||||
|
echo "$integrity_result" | while IFS= read -r line; do
|
||||||
|
log_warning " $line"
|
||||||
|
done
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
# Recovery Method 1: SQLite .recover command
|
||||||
|
recovery_method_sqlite_recover() {
|
||||||
|
local db_file="$1"
|
||||||
|
local db_name=$(basename "$db_file")
|
||||||
|
local recovered_sql="${db_file}.recovered.sql"
|
||||||
|
local new_db="${db_file}.recovered"
|
||||||
|
|
||||||
|
log_info "Recovery Method 1: SQLite .recover command for $db_name"
|
||||||
|
|
||||||
|
if [ "$DRY_RUN" = true ]; then
|
||||||
|
log_info "DRY RUN: Would attempt SQLite .recover method"
|
||||||
|
return 0
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Check if .recover is available (SQLite 3.37.0+)
|
||||||
|
if ! echo ".help" | sqlite3 2>/dev/null | grep -q "\.recover"; then
|
||||||
|
log_warning "SQLite .recover command not available in this version"
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
log_info "Attempting SQLite .recover method..."
|
||||||
|
|
||||||
|
# Use standard sqlite3 for .recover as it's more reliable
|
||||||
|
if sqlite3 "$db_file" ".recover" > "$recovered_sql" 2>/dev/null; then
|
||||||
|
log_success "Recovery SQL generated successfully"
|
||||||
|
|
||||||
|
# Create new database from recovered data
|
||||||
|
if [ -f "$recovered_sql" ] && [ -s "$recovered_sql" ]; then
|
||||||
|
if sqlite3 "$new_db" < "$recovered_sql" 2>/dev/null; then
|
||||||
|
log_success "New database created from recovered data"
|
||||||
|
|
||||||
|
# Verify new database integrity
|
||||||
|
if sqlite3 "$new_db" "PRAGMA integrity_check;" | grep -q "ok"; then
|
||||||
|
log_success "Recovered database integrity verified"
|
||||||
|
|
||||||
|
# Replace original with recovered database
|
||||||
|
if sudo mv "$db_file" "${db_file}.corrupted" && sudo mv "$new_db" "$db_file"; then
|
||||||
|
sudo chown plex:plex "$db_file"
|
||||||
|
sudo chmod 644 "$db_file"
|
||||||
|
log_success "Database successfully recovered using .recover method"
|
||||||
|
|
||||||
|
# Clean up
|
||||||
|
rm -f "$recovered_sql"
|
||||||
|
|
||||||
|
return 0
|
||||||
|
else
|
||||||
|
log_error "Failed to replace original database"
|
||||||
|
fi
|
||||||
|
else
|
||||||
|
log_error "Recovered database failed integrity check"
|
||||||
|
fi
|
||||||
|
else
|
||||||
|
log_error "Failed to create database from recovered SQL"
|
||||||
|
fi
|
||||||
|
else
|
||||||
|
log_error "Recovery SQL file is empty or not generated"
|
||||||
|
fi
|
||||||
|
else
|
||||||
|
log_error "SQLite .recover command failed"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Clean up on failure
|
||||||
|
rm -f "$recovered_sql" "$new_db"
|
||||||
|
return 1
|
||||||
|
}
|
||||||
|
|
||||||
|
# Recovery Method 2: Partial table extraction
|
||||||
|
recovery_method_partial_extraction() {
|
||||||
|
local db_file="$1"
|
||||||
|
local db_name=$(basename "$db_file")
|
||||||
|
local partial_sql="${db_file}.partial.sql"
|
||||||
|
local new_db="${db_file}.partial"
|
||||||
|
|
||||||
|
log_info "Recovery Method 2: Partial table extraction for $db_name"
|
||||||
|
|
||||||
|
if [ "$DRY_RUN" = true ]; then
|
||||||
|
log_info "DRY RUN: Would attempt partial extraction method"
|
||||||
|
return 0
|
||||||
|
fi
|
||||||
|
|
||||||
|
log_info "Extracting schema and partial data..."
|
||||||
|
|
||||||
|
# Start the SQL file with schema
|
||||||
|
{
|
||||||
|
echo "-- Partial recovery of $db_name"
|
||||||
|
echo "-- Generated on $(date)"
|
||||||
|
echo ""
|
||||||
|
} > "$partial_sql"
|
||||||
|
|
||||||
|
# Extract schema
|
||||||
|
if sudo "$PLEX_SQLITE" "$db_file" ".schema" >> "$partial_sql" 2>/dev/null; then
|
||||||
|
log_success "Schema extracted successfully"
|
||||||
|
else
|
||||||
|
log_warning "Schema extraction failed, trying alternative method"
|
||||||
|
# Try with standard sqlite3
|
||||||
|
if sqlite3 "$db_file" ".schema" >> "$partial_sql" 2>/dev/null; then
|
||||||
|
log_success "Schema extracted with standard sqlite3"
|
||||||
|
else
|
||||||
|
log_error "Schema extraction failed completely"
|
||||||
|
rm -f "$partial_sql"
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Critical tables to extract (in order of importance)
|
||||||
|
local critical_tables=(
|
||||||
|
"accounts"
|
||||||
|
"library_sections"
|
||||||
|
"directories"
|
||||||
|
"metadata_items"
|
||||||
|
"media_items"
|
||||||
|
"media_parts"
|
||||||
|
"media_streams"
|
||||||
|
"taggings"
|
||||||
|
"tags"
|
||||||
|
)
|
||||||
|
|
||||||
|
log_info "Attempting to extract critical tables..."
|
||||||
|
|
||||||
|
for table in "${critical_tables[@]}"; do
|
||||||
|
log_info "Extracting table: $table"
|
||||||
|
|
||||||
|
# Try to extract with LIMIT to avoid hitting corrupted data
|
||||||
|
local extract_success=false
|
||||||
|
local limit=10000
|
||||||
|
|
||||||
|
while [ $limit -le 100000 ] && [ "$extract_success" = false ]; do
|
||||||
|
if sudo "$PLEX_SQLITE" "$db_file" "SELECT COUNT(*) FROM $table;" >/dev/null 2>&1; then
|
||||||
|
# Table exists and is readable
|
||||||
|
{
|
||||||
|
echo ""
|
||||||
|
echo "-- Data for table $table (limited to $limit rows)"
|
||||||
|
echo "DELETE FROM $table;"
|
||||||
|
} >> "$partial_sql"
|
||||||
|
|
||||||
|
if sudo "$PLEX_SQLITE" "$db_file" ".mode insert $table" >>/dev/null 2>&1 && \
|
||||||
|
sudo "$PLEX_SQLITE" "$db_file" "SELECT * FROM $table LIMIT $limit;" >> "$partial_sql" 2>/dev/null; then
|
||||||
|
local row_count=$(tail -n +3 "$partial_sql" | grep "INSERT INTO $table" | wc -l)
|
||||||
|
log_success "Extracted $row_count rows from $table"
|
||||||
|
extract_success=true
|
||||||
|
else
|
||||||
|
log_warning "Failed to extract $table with limit $limit, trying smaller limit"
|
||||||
|
limit=$((limit / 2))
|
||||||
|
fi
|
||||||
|
else
|
||||||
|
log_warning "Table $table is not accessible or doesn't exist"
|
||||||
|
break
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
|
||||||
|
if [ "$extract_success" = false ]; then
|
||||||
|
log_warning "Could not extract any data from table $table"
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
|
||||||
|
# Create new database from partial data
|
||||||
|
if [ -f "$partial_sql" ] && [ -s "$partial_sql" ]; then
|
||||||
|
log_info "Creating database from partial extraction..."
|
||||||
|
|
||||||
|
if sqlite3 "$new_db" < "$partial_sql" 2>/dev/null; then
|
||||||
|
log_success "Partial database created successfully"
|
||||||
|
|
||||||
|
# Verify basic functionality
|
||||||
|
if sqlite3 "$new_db" "PRAGMA integrity_check;" | grep -q "ok"; then
|
||||||
|
log_success "Partial database integrity verified"
|
||||||
|
|
||||||
|
# Replace original with partial database
|
||||||
|
if sudo mv "$db_file" "${db_file}.corrupted" && sudo mv "$new_db" "$db_file"; then
|
||||||
|
sudo chown plex:plex "$db_file"
|
||||||
|
sudo chmod 644 "$db_file"
|
||||||
|
log_success "Database partially recovered - some data may be lost"
|
||||||
|
log_warning "Please verify your Plex library after recovery"
|
||||||
|
|
||||||
|
# Clean up
|
||||||
|
rm -f "$partial_sql"
|
||||||
|
|
||||||
|
return 0
|
||||||
|
else
|
||||||
|
log_error "Failed to replace original database"
|
||||||
|
fi
|
||||||
|
else
|
||||||
|
log_error "Partial database failed integrity check"
|
||||||
|
fi
|
||||||
|
else
|
||||||
|
log_error "Failed to create database from partial extraction"
|
||||||
|
fi
|
||||||
|
else
|
||||||
|
log_error "Partial extraction SQL file is empty"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Clean up on failure
|
||||||
|
rm -f "$partial_sql" "$new_db"
|
||||||
|
return 1
|
||||||
|
}
|
||||||
|
|
||||||
|
# Recovery Method 3: Emergency data extraction
|
||||||
|
recovery_method_emergency_extraction() {
|
||||||
|
local db_file="$1"
|
||||||
|
local db_name=$(basename "$db_file")
|
||||||
|
|
||||||
|
log_info "Recovery Method 3: Emergency data extraction for $db_name"
|
||||||
|
|
||||||
|
if [ "$DRY_RUN" = true ]; then
|
||||||
|
log_info "DRY RUN: Would attempt emergency extraction method"
|
||||||
|
return 0
|
||||||
|
fi
|
||||||
|
|
||||||
|
log_warning "This method will create a minimal database with basic library structure"
|
||||||
|
log_warning "You will likely need to re-scan your media libraries"
|
||||||
|
|
||||||
|
if [ "$AUTO_MODE" = false ]; then
|
||||||
|
read -p "Continue with emergency extraction? This will lose most metadata [y/N]: " -n 1 -r
|
||||||
|
echo
|
||||||
|
if [[ ! $REPLY =~ ^[Yy]$ ]]; then
|
||||||
|
log_info "Emergency extraction cancelled by user"
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
local emergency_db="${db_file}.emergency"
|
||||||
|
|
||||||
|
# Create a minimal database with essential tables
|
||||||
|
log_info "Creating minimal emergency database..."
|
||||||
|
|
||||||
|
cat > "/tmp/emergency_schema.sql" << 'EOF'
|
||||||
|
-- Emergency Plex database schema (minimal)
|
||||||
|
CREATE TABLE accounts (
|
||||||
|
id INTEGER PRIMARY KEY,
|
||||||
|
name TEXT,
|
||||||
|
hashed_password TEXT,
|
||||||
|
salt TEXT,
|
||||||
|
created_at DATETIME,
|
||||||
|
updated_at DATETIME
|
||||||
|
);
|
||||||
|
|
||||||
|
CREATE TABLE library_sections (
|
||||||
|
id INTEGER PRIMARY KEY,
|
||||||
|
name TEXT,
|
||||||
|
section_type INTEGER,
|
||||||
|
agent TEXT,
|
||||||
|
scanner TEXT,
|
||||||
|
language TEXT,
|
||||||
|
created_at DATETIME,
|
||||||
|
updated_at DATETIME
|
||||||
|
);
|
||||||
|
|
||||||
|
CREATE TABLE directories (
|
||||||
|
id INTEGER PRIMARY KEY,
|
||||||
|
library_section_id INTEGER,
|
||||||
|
path TEXT,
|
||||||
|
created_at DATETIME,
|
||||||
|
updated_at DATETIME
|
||||||
|
);
|
||||||
|
|
||||||
|
-- Insert default admin account
|
||||||
|
INSERT INTO accounts (id, name, created_at, updated_at)
|
||||||
|
VALUES (1, 'plex', datetime('now'), datetime('now'));
|
||||||
|
EOF
|
||||||
|
|
||||||
|
if sqlite3 "$emergency_db" < "/tmp/emergency_schema.sql" 2>/dev/null; then
|
||||||
|
log_success "Emergency database created"
|
||||||
|
|
||||||
|
# Replace original with emergency database
|
||||||
|
if sudo mv "$db_file" "${db_file}.corrupted" && sudo mv "$emergency_db" "$db_file"; then
|
||||||
|
sudo chown plex:plex "$db_file"
|
||||||
|
sudo chmod 644 "$db_file"
|
||||||
|
log_success "Emergency database installed"
|
||||||
|
log_warning "You will need to re-add library sections and re-scan media"
|
||||||
|
|
||||||
|
# Clean up
|
||||||
|
rm -f "/tmp/emergency_schema.sql"
|
||||||
|
|
||||||
|
return 0
|
||||||
|
else
|
||||||
|
log_error "Failed to install emergency database"
|
||||||
|
fi
|
||||||
|
else
|
||||||
|
log_error "Failed to create emergency database"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Clean up on failure
|
||||||
|
rm -f "/tmp/emergency_schema.sql" "$emergency_db"
|
||||||
|
return 1
|
||||||
|
}
|
||||||
|
|
||||||
|
# Recovery Method 4: Restore from backup
|
||||||
|
recovery_method_backup_restore() {
|
||||||
|
local db_file="$1"
|
||||||
|
local backup_dir="/mnt/share/media/backups/plex"
|
||||||
|
|
||||||
|
log_info "Recovery Method 4: Restore from most recent backup"
|
||||||
|
|
||||||
|
if [ "$DRY_RUN" = true ]; then
|
||||||
|
log_info "DRY RUN: Would attempt backup restoration"
|
||||||
|
return 0
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Find most recent backup
|
||||||
|
local latest_backup=$(find "$backup_dir" -maxdepth 1 -name "plex-backup-*.tar.gz" -type f 2>/dev/null | sort -r | head -1)
|
||||||
|
|
||||||
|
if [ -z "$latest_backup" ]; then
|
||||||
|
log_error "No backup files found in $backup_dir"
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
log_info "Found latest backup: $(basename "$latest_backup")"
|
||||||
|
|
||||||
|
if [ "$AUTO_MODE" = false ]; then
|
||||||
|
read -p "Restore from backup $(basename "$latest_backup")? [y/N]: " -n 1 -r
|
||||||
|
echo
|
||||||
|
if [[ ! $REPLY =~ ^[Yy]$ ]]; then
|
||||||
|
log_info "Backup restoration cancelled by user"
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Extract and restore database from backup
|
||||||
|
local temp_extract="/tmp/plex-recovery-extract-$(date +%Y%m%d_%H%M%S)"
|
||||||
|
mkdir -p "$temp_extract"
|
||||||
|
|
||||||
|
log_info "Extracting backup..."
|
||||||
|
if tar -xzf "$latest_backup" -C "$temp_extract" 2>/dev/null; then
|
||||||
|
local backup_db_file="$temp_extract/$(basename "$db_file")"
|
||||||
|
|
||||||
|
if [ -f "$backup_db_file" ]; then
|
||||||
|
# Verify backup database integrity
|
||||||
|
if sqlite3 "$backup_db_file" "PRAGMA integrity_check;" | grep -q "ok"; then
|
||||||
|
log_success "Backup database integrity verified"
|
||||||
|
|
||||||
|
# Replace corrupted database with backup
|
||||||
|
if sudo mv "$db_file" "${db_file}.corrupted" && sudo cp "$backup_db_file" "$db_file"; then
|
||||||
|
sudo chown plex:plex "$db_file"
|
||||||
|
sudo chmod 644 "$db_file"
|
||||||
|
log_success "Database restored from backup"
|
||||||
|
|
||||||
|
# Clean up
|
||||||
|
rm -rf "$temp_extract"
|
||||||
|
|
||||||
|
return 0
|
||||||
|
else
|
||||||
|
log_error "Failed to replace database with backup"
|
||||||
|
fi
|
||||||
|
else
|
||||||
|
log_error "Backup database also has integrity issues"
|
||||||
|
fi
|
||||||
|
else
|
||||||
|
log_error "Database file not found in backup"
|
||||||
|
fi
|
||||||
|
else
|
||||||
|
log_error "Failed to extract backup"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Clean up on failure
|
||||||
|
rm -rf "$temp_extract"
|
||||||
|
return 1
|
||||||
|
}
|
||||||
|
|
||||||
|
# Main recovery function
|
||||||
|
main_recovery() {
|
||||||
|
local db_file="$PLEX_DB_DIR/$MAIN_DB"
|
||||||
|
|
||||||
|
log_info "Starting Plex database recovery process"
|
||||||
|
log_info "Recovery log: $RECOVERY_LOG"
|
||||||
|
|
||||||
|
# Check dependencies
|
||||||
|
if ! check_dependencies; then
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Stop Plex service
|
||||||
|
if ! stop_plex_service; then
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Change to database directory
|
||||||
|
cd "$PLEX_DB_DIR" || {
|
||||||
|
log_error "Failed to change to database directory"
|
||||||
|
exit 1
|
||||||
|
}
|
||||||
|
|
||||||
|
# Check if database exists
|
||||||
|
if [ ! -f "$MAIN_DB" ]; then
|
||||||
|
log_error "Main database file not found: $MAIN_DB"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Create backup of current corrupted state
|
||||||
|
log_info "Creating backup of current corrupted database..."
|
||||||
|
if [ "$DRY_RUN" = false ]; then
|
||||||
|
sudo cp "$MAIN_DB" "${MAIN_DB}.${BACKUP_SUFFIX}"
|
||||||
|
log_success "Corrupted database backed up as: ${MAIN_DB}.${BACKUP_SUFFIX}"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Check current integrity
|
||||||
|
log_info "Verifying database corruption..."
|
||||||
|
if check_database_integrity "$MAIN_DB"; then
|
||||||
|
log_success "Database integrity check passed - no recovery needed!"
|
||||||
|
start_plex_service
|
||||||
|
exit 0
|
||||||
|
fi
|
||||||
|
|
||||||
|
log_warning "Database corruption confirmed, attempting recovery..."
|
||||||
|
|
||||||
|
# Try recovery methods in order
|
||||||
|
local recovery_methods=(
|
||||||
|
"recovery_method_sqlite_recover"
|
||||||
|
"recovery_method_partial_extraction"
|
||||||
|
"recovery_method_emergency_extraction"
|
||||||
|
"recovery_method_backup_restore"
|
||||||
|
)
|
||||||
|
|
||||||
|
for method in "${recovery_methods[@]}"; do
|
||||||
|
log_info "Attempting: $method"
|
||||||
|
|
||||||
|
if $method "$MAIN_DB"; then
|
||||||
|
log_success "Recovery successful using: $method"
|
||||||
|
|
||||||
|
# Verify the recovered database
|
||||||
|
if check_database_integrity "$MAIN_DB"; then
|
||||||
|
log_success "Recovered database integrity verified"
|
||||||
|
start_plex_service
|
||||||
|
log_success "Database recovery completed successfully!"
|
||||||
|
log_info "Please check your Plex server and verify your libraries"
|
||||||
|
exit 0
|
||||||
|
else
|
||||||
|
log_error "Recovered database still has integrity issues"
|
||||||
|
# Restore backup for next attempt
|
||||||
|
if [ "$DRY_RUN" = false ]; then
|
||||||
|
sudo cp "${MAIN_DB}.${BACKUP_SUFFIX}" "$MAIN_DB"
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
else
|
||||||
|
log_warning "Recovery method failed: $method"
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
|
||||||
|
log_error "All recovery methods failed"
|
||||||
|
log_error "Manual intervention required"
|
||||||
|
|
||||||
|
# Restore original corrupted database
|
||||||
|
if [ "$DRY_RUN" = false ]; then
|
||||||
|
sudo cp "${MAIN_DB}.${BACKUP_SUFFIX}" "$MAIN_DB"
|
||||||
|
fi
|
||||||
|
|
||||||
|
start_plex_service
|
||||||
|
exit 1
|
||||||
|
}
|
||||||
|
|
||||||
|
# Trap to ensure Plex service is restarted
|
||||||
|
trap 'start_plex_service' EXIT
|
||||||
|
|
||||||
|
# Run main recovery
|
||||||
|
main_recovery "$@"
|
||||||
@@ -87,12 +87,17 @@ log_warning() {
|
|||||||
# List available backups
|
# List available backups
|
||||||
list_backups() {
|
list_backups() {
|
||||||
log_message "Available backups:"
|
log_message "Available backups:"
|
||||||
find "$BACKUP_ROOT" -maxdepth 1 -type f -name "plex-backup-*.tar.gz" | sort -r | while read backup_file; do
|
find "$BACKUP_ROOT" -maxdepth 1 -type f -name "plex-backup-*.tar.gz" | sort -r | while read -r backup_file; do
|
||||||
local backup_name=$(basename "$backup_file")
|
local backup_name
|
||||||
local backup_date=$(echo "$backup_name" | sed 's/plex-backup-\([0-9]\{8\}\)_[0-9]\{6\}\.tar\.gz/\1/')
|
backup_name=$(basename "$backup_file")
|
||||||
|
local backup_date
|
||||||
|
backup_date=${backup_name#plex-backup-}
|
||||||
|
backup_date=${backup_date%_*.tar.gz}
|
||||||
if [[ "$backup_date" =~ ^[0-9]{8}$ ]]; then
|
if [[ "$backup_date" =~ ^[0-9]{8}$ ]]; then
|
||||||
local readable_date=$(date -d "${backup_date:0:4}-${backup_date:4:2}-${backup_date:6:2}" '+%B %d, %Y' 2>/dev/null || echo "Unknown date")
|
local readable_date
|
||||||
local file_size=$(du -h "$backup_file" 2>/dev/null | cut -f1)
|
readable_date=$(date -d "${backup_date:0:4}-${backup_date:4:2}-${backup_date:6:2}" '+%B %d, %Y' 2>/dev/null || echo "Unknown date")
|
||||||
|
local file_size
|
||||||
|
file_size=$(du -h "$backup_file" 2>/dev/null | cut -f1)
|
||||||
echo " $backup_name ($readable_date) - $file_size"
|
echo " $backup_name ($readable_date) - $file_size"
|
||||||
else
|
else
|
||||||
echo " $backup_name - $(du -h "$backup_file" 2>/dev/null | cut -f1)"
|
echo " $backup_name - $(du -h "$backup_file" 2>/dev/null | cut -f1)"
|
||||||
@@ -117,7 +122,7 @@ validate_backup() {
|
|||||||
|
|
||||||
# List contents to verify expected files are present
|
# List contents to verify expected files are present
|
||||||
log_message "Archive contents:"
|
log_message "Archive contents:"
|
||||||
tar -tzf "$backup_file" | while read file; do
|
tar -tzf "$backup_file" | while read -r file; do
|
||||||
log_success " Found: $file"
|
log_success " Found: $file"
|
||||||
done
|
done
|
||||||
return 0
|
return 0
|
||||||
@@ -129,7 +134,8 @@ validate_backup() {
|
|||||||
|
|
||||||
# Create backup of current Plex data
|
# Create backup of current Plex data
|
||||||
backup_current_data() {
|
backup_current_data() {
|
||||||
local backup_suffix=$(date '+%Y%m%d_%H%M%S')
|
local backup_suffix
|
||||||
|
backup_suffix=$(date '+%Y%m%d_%H%M%S')
|
||||||
local current_backup_dir="$SCRIPT_DIR/plex_current_backup_$backup_suffix"
|
local current_backup_dir="$SCRIPT_DIR/plex_current_backup_$backup_suffix"
|
||||||
|
|
||||||
log_message "Creating backup of current Plex data..."
|
log_message "Creating backup of current Plex data..."
|
||||||
@@ -162,7 +168,8 @@ restore_files() {
|
|||||||
fi
|
fi
|
||||||
|
|
||||||
# Create temporary extraction directory
|
# Create temporary extraction directory
|
||||||
local temp_dir="/tmp/plex-restore-$(date +%Y%m%d_%H%M%S)"
|
local temp_dir
|
||||||
|
temp_dir="/tmp/plex-restore-$(date +%Y%m%d_%H%M%S)"
|
||||||
mkdir -p "$temp_dir"
|
mkdir -p "$temp_dir"
|
||||||
|
|
||||||
log_message "Extracting backup archive..."
|
log_message "Extracting backup archive..."
|
||||||
@@ -277,8 +284,8 @@ main() {
|
|||||||
manage_plex_service stop
|
manage_plex_service stop
|
||||||
|
|
||||||
# Backup current data
|
# Backup current data
|
||||||
local current_backup=$(backup_current_data)
|
local current_backup
|
||||||
if [ $? -ne 0 ]; then
|
if ! current_backup=$(backup_current_data); then
|
||||||
log_error "Failed to backup current data"
|
log_error "Failed to backup current data"
|
||||||
manage_plex_service start
|
manage_plex_service start
|
||||||
exit 1
|
exit 1
|
||||||
|
|||||||
306
plex/restore-plex.sh.sc2162_backup
Executable file
306
plex/restore-plex.sh.sc2162_backup
Executable file
@@ -0,0 +1,306 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
################################################################################
|
||||||
|
# Plex Media Server Backup Restoration Script
|
||||||
|
################################################################################
|
||||||
|
#
|
||||||
|
# Author: Peter Wood <peter@peterwood.dev>
|
||||||
|
# Description: Safe and reliable restoration script for Plex Media Server
|
||||||
|
# backups with validation, dry-run capability, and automatic
|
||||||
|
# backup of current data before restoration.
|
||||||
|
#
|
||||||
|
# Features:
|
||||||
|
# - Interactive backup selection from available archives
|
||||||
|
# - Backup validation before restoration
|
||||||
|
# - Dry-run mode for testing restoration process
|
||||||
|
# - Automatic backup of current data before restoration
|
||||||
|
# - Service management (stop/start Plex during restoration)
|
||||||
|
# - Comprehensive logging and error handling
|
||||||
|
# - File ownership and permission restoration
|
||||||
|
#
|
||||||
|
# Related Scripts:
|
||||||
|
# - backup-plex.sh: Creates backups that this script restores
|
||||||
|
# - validate-plex-backups.sh: Validates backup integrity
|
||||||
|
# - monitor-plex-backup.sh: Monitors backup system health
|
||||||
|
# - test-plex-backup.sh: Tests backup/restore operations
|
||||||
|
# - plex.sh: General Plex service management
|
||||||
|
#
|
||||||
|
# Usage:
|
||||||
|
# ./restore-plex.sh # List available backups
|
||||||
|
# ./restore-plex.sh plex-backup-20250125_143022.tar.gz # Restore specific backup
|
||||||
|
# ./restore-plex.sh --dry-run backup-file.tar.gz # Test restoration process
|
||||||
|
# ./restore-plex.sh --list # List all available backups
|
||||||
|
#
|
||||||
|
# Dependencies:
|
||||||
|
# - tar (for archive extraction)
|
||||||
|
# - Plex Media Server
|
||||||
|
# - systemctl (for service management)
|
||||||
|
# - Access to backup directory
|
||||||
|
#
|
||||||
|
# Exit Codes:
|
||||||
|
# 0 - Success
|
||||||
|
# 1 - General error
|
||||||
|
# 2 - Backup file not found or invalid
|
||||||
|
# 3 - Service management failure
|
||||||
|
# 4 - Restoration failure
|
||||||
|
#
|
||||||
|
################################################################################
|
||||||
|
|
||||||
|
# Plex Backup Restoration Script
|
||||||
|
# Usage: ./restore-plex.sh [backup_date] [--dry-run]
|
||||||
|
|
||||||
|
set -e
|
||||||
|
|
||||||
|
RED='\033[0;31m'
|
||||||
|
GREEN='\033[0;32m'
|
||||||
|
YELLOW='\033[1;33m'
|
||||||
|
NC='\033[0m'
|
||||||
|
|
||||||
|
# Configuration
|
||||||
|
SCRIPT_DIR="$(dirname "$(readlink -f "$0")")"
|
||||||
|
BACKUP_ROOT="/mnt/share/media/backups/plex"
|
||||||
|
PLEX_DATA_DIR="/var/lib/plexmediaserver/Library/Application Support/Plex Media Server"
|
||||||
|
|
||||||
|
# Plex file locations
|
||||||
|
declare -A RESTORE_LOCATIONS=(
|
||||||
|
["com.plexapp.plugins.library.db"]="$PLEX_DATA_DIR/Plug-in Support/Databases/"
|
||||||
|
["com.plexapp.plugins.library.blobs.db"]="$PLEX_DATA_DIR/Plug-in Support/Databases/"
|
||||||
|
["Preferences.xml"]="$PLEX_DATA_DIR/"
|
||||||
|
)
|
||||||
|
|
||||||
|
log_message() {
|
||||||
|
echo -e "$(date '+%H:%M:%S') $1"
|
||||||
|
}
|
||||||
|
|
||||||
|
log_error() {
|
||||||
|
log_message "${RED}ERROR: $1${NC}"
|
||||||
|
}
|
||||||
|
|
||||||
|
log_success() {
|
||||||
|
log_message "${GREEN}SUCCESS: $1${NC}"
|
||||||
|
}
|
||||||
|
|
||||||
|
log_warning() {
|
||||||
|
log_message "${YELLOW}WARNING: $1${NC}"
|
||||||
|
}
|
||||||
|
|
||||||
|
# List available backups
|
||||||
|
list_backups() {
|
||||||
|
log_message "Available backups:"
|
||||||
|
find "$BACKUP_ROOT" -maxdepth 1 -type f -name "plex-backup-*.tar.gz" | sort -r | while read backup_file; do
|
||||||
|
local backup_name=$(basename "$backup_file")
|
||||||
|
local backup_date=$(echo "$backup_name" | sed 's/plex-backup-\([0-9]\{8\}\)_[0-9]\{6\}\.tar\.gz/\1/')
|
||||||
|
if [[ "$backup_date" =~ ^[0-9]{8}$ ]]; then
|
||||||
|
local readable_date=$(date -d "${backup_date:0:4}-${backup_date:4:2}-${backup_date:6:2}" '+%B %d, %Y' 2>/dev/null || echo "Unknown date")
|
||||||
|
local file_size=$(du -h "$backup_file" 2>/dev/null | cut -f1)
|
||||||
|
echo " $backup_name ($readable_date) - $file_size"
|
||||||
|
else
|
||||||
|
echo " $backup_name - $(du -h "$backup_file" 2>/dev/null | cut -f1)"
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
}
|
||||||
|
|
||||||
|
# Validate backup integrity
|
||||||
|
validate_backup() {
|
||||||
|
local backup_file="$1"
|
||||||
|
|
||||||
|
if [ ! -f "$backup_file" ]; then
|
||||||
|
log_error "Backup file not found: $backup_file"
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
log_message "Validating backup integrity for $(basename "$backup_file")..."
|
||||||
|
|
||||||
|
# Test archive integrity
|
||||||
|
if tar -tzf "$backup_file" >/dev/null 2>&1; then
|
||||||
|
log_success "Archive integrity check passed"
|
||||||
|
|
||||||
|
# List contents to verify expected files are present
|
||||||
|
log_message "Archive contents:"
|
||||||
|
tar -tzf "$backup_file" | while read file; do
|
||||||
|
log_success " Found: $file"
|
||||||
|
done
|
||||||
|
return 0
|
||||||
|
else
|
||||||
|
log_error "Archive integrity check failed"
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
# Create backup of current Plex data
|
||||||
|
backup_current_data() {
|
||||||
|
local backup_suffix=$(date '+%Y%m%d_%H%M%S')
|
||||||
|
local current_backup_dir="$SCRIPT_DIR/plex_current_backup_$backup_suffix"
|
||||||
|
|
||||||
|
log_message "Creating backup of current Plex data..."
|
||||||
|
mkdir -p "$current_backup_dir"
|
||||||
|
|
||||||
|
for file in "${!RESTORE_LOCATIONS[@]}"; do
|
||||||
|
local src="${RESTORE_LOCATIONS[$file]}$file"
|
||||||
|
if [ -f "$src" ]; then
|
||||||
|
if sudo cp "$src" "$current_backup_dir/"; then
|
||||||
|
log_success "Backed up current: $file"
|
||||||
|
else
|
||||||
|
log_error "Failed to backup current: $file"
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
|
||||||
|
log_success "Current data backed up to: $current_backup_dir"
|
||||||
|
echo "$current_backup_dir"
|
||||||
|
}
|
||||||
|
|
||||||
|
# Restore files from backup
|
||||||
|
restore_files() {
|
||||||
|
local backup_file="$1"
|
||||||
|
local dry_run="$2"
|
||||||
|
|
||||||
|
if [ ! -f "$backup_file" ]; then
|
||||||
|
log_error "Backup file not found: $backup_file"
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Create temporary extraction directory
|
||||||
|
local temp_dir="/tmp/plex-restore-$(date +%Y%m%d_%H%M%S)"
|
||||||
|
mkdir -p "$temp_dir"
|
||||||
|
|
||||||
|
log_message "Extracting backup archive..."
|
||||||
|
if ! tar -xzf "$backup_file" -C "$temp_dir"; then
|
||||||
|
log_error "Failed to extract backup archive"
|
||||||
|
rm -rf "$temp_dir"
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
log_message "Restoring files..."
|
||||||
|
local restore_errors=0
|
||||||
|
|
||||||
|
for file in "${!RESTORE_LOCATIONS[@]}"; do
|
||||||
|
local src_file="$temp_dir/$file"
|
||||||
|
local dest_path="${RESTORE_LOCATIONS[$file]}"
|
||||||
|
local dest_file="$dest_path$file"
|
||||||
|
|
||||||
|
if [ -f "$src_file" ]; then
|
||||||
|
if [ "$dry_run" == "true" ]; then
|
||||||
|
log_message "Would restore: $file to $dest_file"
|
||||||
|
else
|
||||||
|
log_message "Restoring: $file"
|
||||||
|
if sudo cp "$src_file" "$dest_file"; then
|
||||||
|
sudo chown plex:plex "$dest_file"
|
||||||
|
log_success "Restored: $file"
|
||||||
|
else
|
||||||
|
log_error "Failed to restore: $file"
|
||||||
|
restore_errors=$((restore_errors + 1))
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
else
|
||||||
|
log_warning "File not found in backup: $file"
|
||||||
|
restore_errors=$((restore_errors + 1))
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
|
||||||
|
# Clean up temporary directory
|
||||||
|
rm -rf "$temp_dir"
|
||||||
|
|
||||||
|
return $restore_errors
|
||||||
|
}
|
||||||
|
|
||||||
|
# Manage Plex service
|
||||||
|
manage_plex_service() {
|
||||||
|
local action="$1"
|
||||||
|
log_message "$action Plex Media Server..."
|
||||||
|
|
||||||
|
case "$action" in
|
||||||
|
"stop")
|
||||||
|
sudo systemctl stop plexmediaserver.service
|
||||||
|
sleep 3
|
||||||
|
log_success "Plex stopped"
|
||||||
|
;;
|
||||||
|
"start")
|
||||||
|
sudo systemctl start plexmediaserver.service
|
||||||
|
sleep 3
|
||||||
|
log_success "Plex started"
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
}
|
||||||
|
|
||||||
|
# Main function
|
||||||
|
main() {
|
||||||
|
local backup_file="$1"
|
||||||
|
local dry_run=false
|
||||||
|
|
||||||
|
# Check for dry-run flag
|
||||||
|
if [ "$2" = "--dry-run" ] || [ "$1" = "--dry-run" ]; then
|
||||||
|
dry_run=true
|
||||||
|
fi
|
||||||
|
|
||||||
|
# If no backup file provided, list available backups
|
||||||
|
if [ -z "$backup_file" ] || [ "$backup_file" = "--dry-run" ]; then
|
||||||
|
list_backups
|
||||||
|
echo
|
||||||
|
echo "Usage: $0 <backup_file> [--dry-run]"
|
||||||
|
echo "Example: $0 plex-backup-20250125_143022.tar.gz"
|
||||||
|
echo " $0 /mnt/share/media/backups/plex/plex-backup-20250125_143022.tar.gz"
|
||||||
|
exit 0
|
||||||
|
fi
|
||||||
|
|
||||||
|
# If relative path, prepend BACKUP_ROOT
|
||||||
|
if [[ "$backup_file" != /* ]]; then
|
||||||
|
backup_file="$BACKUP_ROOT/$backup_file"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Validate backup exists and is complete
|
||||||
|
if ! validate_backup "$backup_file"; then
|
||||||
|
log_error "Backup validation failed"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ "$dry_run" = "true" ]; then
|
||||||
|
restore_files "$backup_file" true
|
||||||
|
log_message "Dry run completed. No changes were made."
|
||||||
|
exit 0
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Confirm restoration
|
||||||
|
echo
|
||||||
|
log_warning "This will restore Plex data from backup $(basename "$backup_file")"
|
||||||
|
log_warning "Current Plex data will be backed up before restoration"
|
||||||
|
read -p "Continue? (y/N): " -n 1 -r
|
||||||
|
echo
|
||||||
|
|
||||||
|
if [[ ! $REPLY =~ ^[Yy]$ ]]; then
|
||||||
|
log_message "Restoration cancelled"
|
||||||
|
exit 0
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Stop Plex service
|
||||||
|
manage_plex_service stop
|
||||||
|
|
||||||
|
# Backup current data
|
||||||
|
local current_backup=$(backup_current_data)
|
||||||
|
if [ $? -ne 0 ]; then
|
||||||
|
log_error "Failed to backup current data"
|
||||||
|
manage_plex_service start
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Restore files
|
||||||
|
if restore_files "$backup_file" false; then
|
||||||
|
log_success "Restoration completed successfully"
|
||||||
|
log_message "Current data backup saved at: $current_backup"
|
||||||
|
else
|
||||||
|
log_error "Restoration failed"
|
||||||
|
manage_plex_service start
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Start Plex service
|
||||||
|
manage_plex_service start
|
||||||
|
|
||||||
|
log_success "Plex restoration completed. Please verify your server is working correctly."
|
||||||
|
}
|
||||||
|
|
||||||
|
# Trap to ensure Plex is restarted on script exit
|
||||||
|
trap 'manage_plex_service start' EXIT
|
||||||
|
|
||||||
|
main "$@"
|
||||||
@@ -62,7 +62,6 @@ CYAN='\033[0;36m'
|
|||||||
NC='\033[0m' # No Color
|
NC='\033[0m' # No Color
|
||||||
|
|
||||||
# Test configuration
|
# Test configuration
|
||||||
SCRIPT_DIR="$(dirname "$(readlink -f "$0")")"
|
|
||||||
TEST_DIR="/tmp/plex-backup-test-$(date +%s)"
|
TEST_DIR="/tmp/plex-backup-test-$(date +%s)"
|
||||||
TEST_BACKUP_ROOT="$TEST_DIR/backups"
|
TEST_BACKUP_ROOT="$TEST_DIR/backups"
|
||||||
TEST_LOG_ROOT="$TEST_DIR/logs"
|
TEST_LOG_ROOT="$TEST_DIR/logs"
|
||||||
@@ -76,30 +75,35 @@ declare -a FAILED_TESTS=()
|
|||||||
|
|
||||||
# Logging functions
|
# Logging functions
|
||||||
log_test() {
|
log_test() {
|
||||||
local timestamp=$(date '+%Y-%m-%d %H:%M:%S')
|
local timestamp
|
||||||
|
timestamp=$(date '+%Y-%m-%d %H:%M:%S')
|
||||||
echo -e "${CYAN}[TEST ${timestamp}]${NC} $1"
|
echo -e "${CYAN}[TEST ${timestamp}]${NC} $1"
|
||||||
}
|
}
|
||||||
|
|
||||||
log_pass() {
|
log_pass() {
|
||||||
local timestamp=$(date '+%Y-%m-%d %H:%M:%S')
|
local timestamp
|
||||||
|
timestamp=$(date '+%Y-%m-%d %H:%M:%S')
|
||||||
echo -e "${GREEN}[PASS ${timestamp}]${NC} $1"
|
echo -e "${GREEN}[PASS ${timestamp}]${NC} $1"
|
||||||
TESTS_PASSED=$((TESTS_PASSED + 1))
|
TESTS_PASSED=$((TESTS_PASSED + 1))
|
||||||
}
|
}
|
||||||
|
|
||||||
log_fail() {
|
log_fail() {
|
||||||
local timestamp=$(date '+%Y-%m-%d %H:%M:%S')
|
local timestamp
|
||||||
|
timestamp=$(date '+%Y-%m-%d %H:%M:%S')
|
||||||
echo -e "${RED}[FAIL ${timestamp}]${NC} $1"
|
echo -e "${RED}[FAIL ${timestamp}]${NC} $1"
|
||||||
TESTS_FAILED=$((TESTS_FAILED + 1))
|
TESTS_FAILED=$((TESTS_FAILED + 1))
|
||||||
FAILED_TESTS+=("$1")
|
FAILED_TESTS+=("$1")
|
||||||
}
|
}
|
||||||
|
|
||||||
log_info() {
|
log_info() {
|
||||||
local timestamp=$(date '+%Y-%m-%d %H:%M:%S')
|
local timestamp
|
||||||
|
timestamp=$(date '+%Y-%m-%d %H:%M:%S')
|
||||||
echo -e "${BLUE}[INFO ${timestamp}]${NC} $1"
|
echo -e "${BLUE}[INFO ${timestamp}]${NC} $1"
|
||||||
}
|
}
|
||||||
|
|
||||||
log_warn() {
|
log_warn() {
|
||||||
local timestamp=$(date '+%Y-%m-%d %H:%M:%S')
|
local timestamp
|
||||||
|
timestamp=$(date '+%Y-%m-%d %H:%M:%S')
|
||||||
echo -e "${YELLOW}[WARN ${timestamp}]${NC} $1"
|
echo -e "${YELLOW}[WARN ${timestamp}]${NC} $1"
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -124,14 +128,16 @@ record_test_result() {
|
|||||||
local test_name="$1"
|
local test_name="$1"
|
||||||
local status="$2"
|
local status="$2"
|
||||||
local error_message="$3"
|
local error_message="$3"
|
||||||
local timestamp=$(date -Iseconds)
|
local timestamp
|
||||||
|
timestamp=$(date -Iseconds)
|
||||||
|
|
||||||
# Initialize results file if it doesn't exist
|
# Initialize results file if it doesn't exist
|
||||||
if [ ! -f "$TEST_RESULTS_FILE" ]; then
|
if [ ! -f "$TEST_RESULTS_FILE" ]; then
|
||||||
echo "[]" > "$TEST_RESULTS_FILE"
|
echo "[]" > "$TEST_RESULTS_FILE"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
local result=$(jq -n \
|
local result
|
||||||
|
result=$(jq -n \
|
||||||
--arg test_name "$test_name" \
|
--arg test_name "$test_name" \
|
||||||
--arg status "$status" \
|
--arg status "$status" \
|
||||||
--arg error_message "$error_message" \
|
--arg error_message "$error_message" \
|
||||||
@@ -186,7 +192,7 @@ mock_manage_plex_service() {
|
|||||||
|
|
||||||
mock_calculate_checksum() {
|
mock_calculate_checksum() {
|
||||||
local file="$1"
|
local file="$1"
|
||||||
echo "$(echo "$file" | md5sum | cut -d' ' -f1)"
|
echo "$file" | md5sum | cut -d' ' -f1
|
||||||
return 0
|
return 0
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -226,10 +232,12 @@ test_performance_tracking() {
|
|||||||
track_performance_test() {
|
track_performance_test() {
|
||||||
local operation="$1"
|
local operation="$1"
|
||||||
local start_time="$2"
|
local start_time="$2"
|
||||||
local end_time=$(date +%s)
|
local end_time
|
||||||
|
end_time=$(date +%s)
|
||||||
local duration=$((end_time - start_time))
|
local duration=$((end_time - start_time))
|
||||||
|
|
||||||
local entry=$(jq -n \
|
local entry
|
||||||
|
entry=$(jq -n \
|
||||||
--arg operation "$operation" \
|
--arg operation "$operation" \
|
||||||
--arg duration "$duration" \
|
--arg duration "$duration" \
|
||||||
--arg timestamp "$(date -Iseconds)" \
|
--arg timestamp "$(date -Iseconds)" \
|
||||||
@@ -244,12 +252,14 @@ test_performance_tracking() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
# Test tracking
|
# Test tracking
|
||||||
local start_time=$(date +%s)
|
local start_time
|
||||||
|
start_time=$(date +%s)
|
||||||
sleep 1 # Simulate work
|
sleep 1 # Simulate work
|
||||||
track_performance_test "test_operation" "$start_time"
|
track_performance_test "test_operation" "$start_time"
|
||||||
|
|
||||||
# Verify entry was added
|
# Verify entry was added
|
||||||
local entry_count=$(jq length "$test_perf_log")
|
local entry_count
|
||||||
|
entry_count=$(jq length "$test_perf_log")
|
||||||
if [ "$entry_count" -eq 1 ]; then
|
if [ "$entry_count" -eq 1 ]; then
|
||||||
return 0
|
return 0
|
||||||
else
|
else
|
||||||
@@ -297,11 +307,13 @@ test_checksum_caching() {
|
|||||||
calculate_checksum_test() {
|
calculate_checksum_test() {
|
||||||
local file="$1"
|
local file="$1"
|
||||||
local cache_file="${file}.md5"
|
local cache_file="${file}.md5"
|
||||||
local file_mtime=$(stat -c %Y "$file" 2>/dev/null || echo "0")
|
local file_mtime
|
||||||
|
file_mtime=$(stat -c %Y "$file" 2>/dev/null || echo "0")
|
||||||
|
|
||||||
# Check cache
|
# Check cache
|
||||||
if [ -f "$cache_file" ]; then
|
if [ -f "$cache_file" ]; then
|
||||||
local cache_mtime=$(stat -c %Y "$cache_file" 2>/dev/null || echo "0")
|
local cache_mtime
|
||||||
|
cache_mtime=$(stat -c %Y "$cache_file" 2>/dev/null || echo "0")
|
||||||
if [ "$cache_mtime" -gt "$file_mtime" ]; then
|
if [ "$cache_mtime" -gt "$file_mtime" ]; then
|
||||||
cat "$cache_file"
|
cat "$cache_file"
|
||||||
return 0
|
return 0
|
||||||
@@ -309,16 +321,19 @@ test_checksum_caching() {
|
|||||||
fi
|
fi
|
||||||
|
|
||||||
# Calculate and cache
|
# Calculate and cache
|
||||||
local checksum=$(md5sum "$file" | cut -d' ' -f1)
|
local checksum
|
||||||
|
checksum=$(md5sum "$file" | cut -d' ' -f1)
|
||||||
echo "$checksum" > "$cache_file"
|
echo "$checksum" > "$cache_file"
|
||||||
echo "$checksum"
|
echo "$checksum"
|
||||||
}
|
}
|
||||||
|
|
||||||
# First calculation (should create cache)
|
# First calculation (should create cache)
|
||||||
local checksum1=$(calculate_checksum_test "$test_file")
|
local checksum1
|
||||||
|
checksum1=$(calculate_checksum_test "$test_file")
|
||||||
|
|
||||||
# Second calculation (should use cache)
|
# Second calculation (should use cache)
|
||||||
local checksum2=$(calculate_checksum_test "$test_file")
|
local checksum2
|
||||||
|
checksum2=$(calculate_checksum_test "$test_file")
|
||||||
|
|
||||||
# Verify checksums match and cache file exists
|
# Verify checksums match and cache file exists
|
||||||
if [ "$checksum1" = "$checksum2" ] && [ -f "$cache_file" ]; then
|
if [ "$checksum1" = "$checksum2" ] && [ -f "$cache_file" ]; then
|
||||||
@@ -342,8 +357,10 @@ test_backup_verification() {
|
|||||||
local src="$1"
|
local src="$1"
|
||||||
local dest="$2"
|
local dest="$2"
|
||||||
|
|
||||||
local src_checksum=$(md5sum "$src" | cut -d' ' -f1)
|
local src_checksum
|
||||||
local dest_checksum=$(md5sum "$dest" | cut -d' ' -f1)
|
src_checksum=$(md5sum "$src" | cut -d' ' -f1)
|
||||||
|
local dest_checksum
|
||||||
|
dest_checksum=$(md5sum "$dest" | cut -d' ' -f1)
|
||||||
|
|
||||||
if [ "$src_checksum" = "$dest_checksum" ]; then
|
if [ "$src_checksum" = "$dest_checksum" ]; then
|
||||||
return 0
|
return 0
|
||||||
@@ -362,16 +379,17 @@ test_backup_verification() {
|
|||||||
|
|
||||||
# Test: Parallel processing framework
|
# Test: Parallel processing framework
|
||||||
test_parallel_processing() {
|
test_parallel_processing() {
|
||||||
local temp_dir=$(mktemp -d)
|
local temp_dir
|
||||||
|
temp_dir=$(mktemp -d)
|
||||||
local -a pids=()
|
local -a pids=()
|
||||||
local total_jobs=5
|
local total_jobs=5
|
||||||
local completed_jobs=0
|
local completed_jobs=0
|
||||||
|
|
||||||
# Simulate parallel jobs
|
# Simulate parallel jobs
|
||||||
for i in $(seq 1 $total_jobs); do
|
for i in $(seq 1 "$total_jobs"); do
|
||||||
(
|
(
|
||||||
# Simulate work
|
# Simulate work
|
||||||
sleep 0.$i
|
sleep 0."$i"
|
||||||
echo "$i" > "$temp_dir/job_$i.result"
|
echo "$i" > "$temp_dir/job_$i.result"
|
||||||
) &
|
) &
|
||||||
pids+=($!)
|
pids+=($!)
|
||||||
@@ -385,7 +403,8 @@ test_parallel_processing() {
|
|||||||
done
|
done
|
||||||
|
|
||||||
# Verify all jobs completed
|
# Verify all jobs completed
|
||||||
local result_files=$(find "$temp_dir" -name "job_*.result" | wc -l)
|
local result_files
|
||||||
|
result_files=$(find "$temp_dir" -name "job_*.result" | wc -l)
|
||||||
|
|
||||||
# Cleanup
|
# Cleanup
|
||||||
rm -rf "$temp_dir"
|
rm -rf "$temp_dir"
|
||||||
@@ -410,7 +429,8 @@ test_database_integrity() {
|
|||||||
local db_file="$1"
|
local db_file="$1"
|
||||||
|
|
||||||
# Use sqlite3 instead of Plex SQLite for testing
|
# Use sqlite3 instead of Plex SQLite for testing
|
||||||
local result=$(sqlite3 "$db_file" "PRAGMA integrity_check;" 2>/dev/null)
|
local result
|
||||||
|
result=$(sqlite3 "$db_file" "PRAGMA integrity_check;" 2>/dev/null)
|
||||||
|
|
||||||
if echo "$result" | grep -q "ok"; then
|
if echo "$result" | grep -q "ok"; then
|
||||||
return 0
|
return 0
|
||||||
@@ -449,7 +469,8 @@ test_configuration_parsing() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
# Test parsing
|
# Test parsing
|
||||||
local result=$(parse_args_test --auto-repair --webhook=http://example.com)
|
local result
|
||||||
|
result=$(parse_args_test --auto-repair --webhook=http://example.com)
|
||||||
|
|
||||||
if echo "$result" | grep -q "true true http://example.com"; then
|
if echo "$result" | grep -q "true true http://example.com"; then
|
||||||
return 0
|
return 0
|
||||||
@@ -523,19 +544,22 @@ run_integration_tests() {
|
|||||||
run_performance_tests() {
|
run_performance_tests() {
|
||||||
log_info "Starting performance benchmarks"
|
log_info "Starting performance benchmarks"
|
||||||
|
|
||||||
local start_time=$(date +%s)
|
local start_time
|
||||||
|
start_time=$(date +%s)
|
||||||
|
|
||||||
# Test file operations
|
# Test file operations
|
||||||
local test_file="$TEST_DIR/perf_test.dat"
|
local test_file="$TEST_DIR/perf_test.dat"
|
||||||
dd if=/dev/zero of="$test_file" bs=1M count=10 2>/dev/null
|
dd if=/dev/zero of="$test_file" bs=1M count=10 2>/dev/null
|
||||||
|
|
||||||
# Benchmark checksum calculation
|
# Benchmark checksum calculation
|
||||||
local checksum_start=$(date +%s)
|
local checksum_start
|
||||||
|
checksum_start=$(date +%s)
|
||||||
md5sum "$test_file" > /dev/null
|
md5sum "$test_file" > /dev/null
|
||||||
local checksum_time=$(($(date +%s) - checksum_start))
|
local checksum_time=$(($(date +%s) - checksum_start))
|
||||||
|
|
||||||
# Benchmark compression
|
# Benchmark compression
|
||||||
local compress_start=$(date +%s)
|
local compress_start
|
||||||
|
compress_start=$(date +%s)
|
||||||
tar -czf "$TEST_DIR/perf_test.tar.gz" -C "$TEST_DIR" "perf_test.dat"
|
tar -czf "$TEST_DIR/perf_test.tar.gz" -C "$TEST_DIR" "perf_test.dat"
|
||||||
local compress_time=$(($(date +%s) - compress_start))
|
local compress_time=$(($(date +%s) - compress_start))
|
||||||
|
|
||||||
@@ -547,7 +571,8 @@ run_performance_tests() {
|
|||||||
log_info " Total benchmark time: ${total_time}s"
|
log_info " Total benchmark time: ${total_time}s"
|
||||||
|
|
||||||
# Record performance data
|
# Record performance data
|
||||||
local perf_entry=$(jq -n \
|
local perf_entry
|
||||||
|
perf_entry=$(jq -n \
|
||||||
--arg checksum_time "$checksum_time" \
|
--arg checksum_time "$checksum_time" \
|
||||||
--arg compress_time "$compress_time" \
|
--arg compress_time "$compress_time" \
|
||||||
--arg total_time "$total_time" \
|
--arg total_time "$total_time" \
|
||||||
@@ -565,7 +590,8 @@ run_performance_tests() {
|
|||||||
|
|
||||||
# Generate comprehensive test report
|
# Generate comprehensive test report
|
||||||
generate_test_report() {
|
generate_test_report() {
|
||||||
local timestamp=$(date '+%Y-%m-%d %H:%M:%S')
|
local timestamp
|
||||||
|
timestamp=$(date '+%Y-%m-%d %H:%M:%S')
|
||||||
|
|
||||||
echo
|
echo
|
||||||
echo "=============================================="
|
echo "=============================================="
|
||||||
@@ -601,7 +627,8 @@ generate_test_report() {
|
|||||||
|
|
||||||
# Save detailed results
|
# Save detailed results
|
||||||
if [ -f "$TEST_RESULTS_FILE" ]; then
|
if [ -f "$TEST_RESULTS_FILE" ]; then
|
||||||
local report_file="$TEST_DIR/test_report_$(date +%Y%m%d_%H%M%S).json"
|
local report_file
|
||||||
|
report_file="$TEST_DIR/test_report_$(date +%Y%m%d_%H%M%S).json"
|
||||||
jq -n \
|
jq -n \
|
||||||
--arg timestamp "$timestamp" \
|
--arg timestamp "$timestamp" \
|
||||||
--arg tests_run "$TESTS_RUN" \
|
--arg tests_run "$TESTS_RUN" \
|
||||||
@@ -645,22 +672,27 @@ run_integration_tests() {
|
|||||||
run_performance_tests() {
|
run_performance_tests() {
|
||||||
log_info "Running performance benchmarks..."
|
log_info "Running performance benchmarks..."
|
||||||
|
|
||||||
local start_time=$(date +%s)
|
local start_time
|
||||||
|
start_time=$(date +%s)
|
||||||
|
|
||||||
# Create large test files
|
# Create large test files
|
||||||
local large_file="$TEST_DIR/large_test.db"
|
local large_file="$TEST_DIR/large_test.db"
|
||||||
dd if=/dev/zero of="$large_file" bs=1M count=100 2>/dev/null
|
dd if=/dev/zero of="$large_file" bs=1M count=100 2>/dev/null
|
||||||
|
|
||||||
# Benchmark checksum calculation
|
# Benchmark checksum calculation
|
||||||
local checksum_start=$(date +%s)
|
local checksum_start
|
||||||
|
checksum_start=$(date +%s)
|
||||||
md5sum "$large_file" > /dev/null
|
md5sum "$large_file" > /dev/null
|
||||||
local checksum_end=$(date +%s)
|
local checksum_end
|
||||||
|
checksum_end=$(date +%s)
|
||||||
local checksum_time=$((checksum_end - checksum_start))
|
local checksum_time=$((checksum_end - checksum_start))
|
||||||
|
|
||||||
# Benchmark compression
|
# Benchmark compression
|
||||||
local compress_start=$(date +%s)
|
local compress_start
|
||||||
|
compress_start=$(date +%s)
|
||||||
tar -czf "$TEST_DIR/large_test.tar.gz" -C "$TEST_DIR" "large_test.db"
|
tar -czf "$TEST_DIR/large_test.tar.gz" -C "$TEST_DIR" "large_test.db"
|
||||||
local compress_end=$(date +%s)
|
local compress_end
|
||||||
|
compress_end=$(date +%s)
|
||||||
local compress_time=$((compress_end - compress_start))
|
local compress_time=$((compress_end - compress_start))
|
||||||
|
|
||||||
local total_time=$(($(date +%s) - start_time))
|
local total_time=$(($(date +%s) - start_time))
|
||||||
|
|||||||
715
plex/test-plex-backup.sh.sc2086_backup
Executable file
715
plex/test-plex-backup.sh.sc2086_backup
Executable file
@@ -0,0 +1,715 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
################################################################################
|
||||||
|
# Plex Backup System Comprehensive Test Suite
|
||||||
|
################################################################################
|
||||||
|
#
|
||||||
|
# Author: Peter Wood <peter@peterwood.dev>
|
||||||
|
# Description: Automated testing framework for the complete Plex backup
|
||||||
|
# ecosystem, providing unit tests, integration tests, and
|
||||||
|
# end-to-end validation of all backup operations.
|
||||||
|
#
|
||||||
|
# Features:
|
||||||
|
# - Unit testing for individual backup components
|
||||||
|
# - Integration testing for full backup workflows
|
||||||
|
# - Database integrity test scenarios
|
||||||
|
# - Service management testing
|
||||||
|
# - Performance benchmarking
|
||||||
|
# - Error condition simulation and recovery testing
|
||||||
|
# - Test result reporting and analysis
|
||||||
|
#
|
||||||
|
# Related Scripts:
|
||||||
|
# - backup-plex.sh: Primary script under test
|
||||||
|
# - restore-plex.sh: Restoration testing component
|
||||||
|
# - validate-plex-backups.sh: Validation testing
|
||||||
|
# - monitor-plex-backup.sh: Monitoring system testing
|
||||||
|
# - plex.sh: Service management testing
|
||||||
|
#
|
||||||
|
# Usage:
|
||||||
|
# ./test-plex-backup.sh # Run full test suite
|
||||||
|
# ./test-plex-backup.sh --unit # Unit tests only
|
||||||
|
# ./test-plex-backup.sh --integration # Integration tests only
|
||||||
|
# ./test-plex-backup.sh --quick # Quick smoke tests
|
||||||
|
# ./test-plex-backup.sh --cleanup # Clean up test artifacts
|
||||||
|
#
|
||||||
|
# Dependencies:
|
||||||
|
# - All Plex backup scripts in this directory
|
||||||
|
# - sqlite3 or Plex SQLite binary
|
||||||
|
# - jq (for JSON processing)
|
||||||
|
# - tar (for archive operations)
|
||||||
|
# - systemctl (for service testing)
|
||||||
|
#
|
||||||
|
# Exit Codes:
|
||||||
|
# 0 - All tests passed
|
||||||
|
# 1 - General error
|
||||||
|
# 2 - Test failures detected
|
||||||
|
# 3 - Missing dependencies
|
||||||
|
# 4 - Test setup failure
|
||||||
|
#
|
||||||
|
################################################################################
|
||||||
|
|
||||||
|
# Comprehensive Plex Backup System Test Suite
|
||||||
|
# This script provides automated testing for all backup-related functionality
|
||||||
|
|
||||||
|
set -e
|
||||||
|
|
||||||
|
# Color codes for output
|
||||||
|
RED='\033[0;31m'
|
||||||
|
GREEN='\033[0;32m'
|
||||||
|
YELLOW='\033[1;33m'
|
||||||
|
BLUE='\033[0;34m'
|
||||||
|
CYAN='\033[0;36m'
|
||||||
|
NC='\033[0m' # No Color
|
||||||
|
|
||||||
|
# Test configuration
|
||||||
|
SCRIPT_DIR="$(dirname "$(readlink -f "$0")")"
|
||||||
|
TEST_DIR="/tmp/plex-backup-test-$(date +%s)"
|
||||||
|
TEST_BACKUP_ROOT="$TEST_DIR/backups"
|
||||||
|
TEST_LOG_ROOT="$TEST_DIR/logs"
|
||||||
|
TEST_RESULTS_FILE="$TEST_DIR/test-results.json"
|
||||||
|
|
||||||
|
# Test counters
|
||||||
|
TESTS_RUN=0
|
||||||
|
TESTS_PASSED=0
|
||||||
|
TESTS_FAILED=0
|
||||||
|
declare -a FAILED_TESTS=()
|
||||||
|
|
||||||
|
# Logging functions
|
||||||
|
log_test() {
|
||||||
|
local timestamp=$(date '+%Y-%m-%d %H:%M:%S')
|
||||||
|
echo -e "${CYAN}[TEST ${timestamp}]${NC} $1"
|
||||||
|
}
|
||||||
|
|
||||||
|
log_pass() {
|
||||||
|
local timestamp=$(date '+%Y-%m-%d %H:%M:%S')
|
||||||
|
echo -e "${GREEN}[PASS ${timestamp}]${NC} $1"
|
||||||
|
TESTS_PASSED=$((TESTS_PASSED + 1))
|
||||||
|
}
|
||||||
|
|
||||||
|
log_fail() {
|
||||||
|
local timestamp=$(date '+%Y-%m-%d %H:%M:%S')
|
||||||
|
echo -e "${RED}[FAIL ${timestamp}]${NC} $1"
|
||||||
|
TESTS_FAILED=$((TESTS_FAILED + 1))
|
||||||
|
FAILED_TESTS+=("$1")
|
||||||
|
}
|
||||||
|
|
||||||
|
log_info() {
|
||||||
|
local timestamp=$(date '+%Y-%m-%d %H:%M:%S')
|
||||||
|
echo -e "${BLUE}[INFO ${timestamp}]${NC} $1"
|
||||||
|
}
|
||||||
|
|
||||||
|
log_warn() {
|
||||||
|
local timestamp=$(date '+%Y-%m-%d %H:%M:%S')
|
||||||
|
echo -e "${YELLOW}[WARN ${timestamp}]${NC} $1"
|
||||||
|
}
|
||||||
|
|
||||||
|
# Test framework functions
|
||||||
|
run_test() {
|
||||||
|
local test_name="$1"
|
||||||
|
local test_function="$2"
|
||||||
|
|
||||||
|
TESTS_RUN=$((TESTS_RUN + 1))
|
||||||
|
log_test "Running: $test_name"
|
||||||
|
|
||||||
|
if $test_function; then
|
||||||
|
log_pass "$test_name"
|
||||||
|
record_test_result "$test_name" "PASS" ""
|
||||||
|
else
|
||||||
|
log_fail "$test_name"
|
||||||
|
record_test_result "$test_name" "FAIL" "Test function returned non-zero exit code"
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
record_test_result() {
|
||||||
|
local test_name="$1"
|
||||||
|
local status="$2"
|
||||||
|
local error_message="$3"
|
||||||
|
local timestamp=$(date -Iseconds)
|
||||||
|
|
||||||
|
# Initialize results file if it doesn't exist
|
||||||
|
if [ ! -f "$TEST_RESULTS_FILE" ]; then
|
||||||
|
echo "[]" > "$TEST_RESULTS_FILE"
|
||||||
|
fi
|
||||||
|
|
||||||
|
local result=$(jq -n \
|
||||||
|
--arg test_name "$test_name" \
|
||||||
|
--arg status "$status" \
|
||||||
|
--arg error_message "$error_message" \
|
||||||
|
--arg timestamp "$timestamp" \
|
||||||
|
'{
|
||||||
|
test_name: $test_name,
|
||||||
|
status: $status,
|
||||||
|
error_message: $error_message,
|
||||||
|
timestamp: $timestamp
|
||||||
|
}')
|
||||||
|
|
||||||
|
jq --argjson result "$result" '. += [$result]' "$TEST_RESULTS_FILE" > "${TEST_RESULTS_FILE}.tmp" && \
|
||||||
|
mv "${TEST_RESULTS_FILE}.tmp" "$TEST_RESULTS_FILE"
|
||||||
|
}
|
||||||
|
|
||||||
|
# Setup test environment
|
||||||
|
setup_test_environment() {
|
||||||
|
log_info "Setting up test environment in $TEST_DIR"
|
||||||
|
|
||||||
|
# Create test directories
|
||||||
|
mkdir -p "$TEST_DIR"
|
||||||
|
mkdir -p "$TEST_BACKUP_ROOT"
|
||||||
|
mkdir -p "$TEST_LOG_ROOT"
|
||||||
|
mkdir -p "$TEST_DIR/mock_plex"
|
||||||
|
|
||||||
|
# Create mock Plex files for testing
|
||||||
|
echo "PRAGMA user_version=1;" > "$TEST_DIR/mock_plex/com.plexapp.plugins.library.db"
|
||||||
|
echo "PRAGMA user_version=1;" > "$TEST_DIR/mock_plex/com.plexapp.plugins.library.blobs.db"
|
||||||
|
dd if=/dev/zero of="$TEST_DIR/mock_plex/Preferences.xml" bs=1024 count=1 2>/dev/null
|
||||||
|
|
||||||
|
# Create mock performance log
|
||||||
|
echo "[]" > "$TEST_DIR/mock-performance.json"
|
||||||
|
echo "{}" > "$TEST_DIR/mock-backup.json"
|
||||||
|
|
||||||
|
log_info "Test environment setup complete"
|
||||||
|
}
|
||||||
|
|
||||||
|
# Cleanup test environment
|
||||||
|
cleanup_test_environment() {
|
||||||
|
if [ -d "$TEST_DIR" ]; then
|
||||||
|
log_info "Cleaning up test environment"
|
||||||
|
rm -rf "$TEST_DIR"
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
# Mock functions to replace actual backup script functions
|
||||||
|
mock_manage_plex_service() {
|
||||||
|
local action="$1"
|
||||||
|
echo "Mock: Plex service $action"
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
mock_calculate_checksum() {
|
||||||
|
local file="$1"
|
||||||
|
echo "$(echo "$file" | md5sum | cut -d' ' -f1)"
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
mock_verify_backup() {
|
||||||
|
local src="$1"
|
||||||
|
local dest="$2"
|
||||||
|
# Always return success for testing
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
# Test: JSON log initialization
|
||||||
|
test_json_log_initialization() {
|
||||||
|
local test_log="$TEST_DIR/test-init.json"
|
||||||
|
|
||||||
|
# Remove file if it exists
|
||||||
|
rm -f "$test_log"
|
||||||
|
|
||||||
|
# Test initialization
|
||||||
|
if [ ! -f "$test_log" ] || ! jq empty "$test_log" 2>/dev/null; then
|
||||||
|
echo "{}" > "$test_log"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Verify file exists and is valid JSON
|
||||||
|
if [ -f "$test_log" ] && jq empty "$test_log" 2>/dev/null; then
|
||||||
|
return 0
|
||||||
|
else
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
# Test: Performance tracking
|
||||||
|
test_performance_tracking() {
|
||||||
|
local test_perf_log="$TEST_DIR/test-performance.json"
|
||||||
|
echo "[]" > "$test_perf_log"
|
||||||
|
|
||||||
|
# Mock performance tracking function
|
||||||
|
track_performance_test() {
|
||||||
|
local operation="$1"
|
||||||
|
local start_time="$2"
|
||||||
|
local end_time=$(date +%s)
|
||||||
|
local duration=$((end_time - start_time))
|
||||||
|
|
||||||
|
local entry=$(jq -n \
|
||||||
|
--arg operation "$operation" \
|
||||||
|
--arg duration "$duration" \
|
||||||
|
--arg timestamp "$(date -Iseconds)" \
|
||||||
|
'{
|
||||||
|
operation: $operation,
|
||||||
|
duration_seconds: ($duration | tonumber),
|
||||||
|
timestamp: $timestamp
|
||||||
|
}')
|
||||||
|
|
||||||
|
jq --argjson entry "$entry" '. += [$entry]' "$test_perf_log" > "${test_perf_log}.tmp" && \
|
||||||
|
mv "${test_perf_log}.tmp" "$test_perf_log"
|
||||||
|
}
|
||||||
|
|
||||||
|
# Test tracking
|
||||||
|
local start_time=$(date +%s)
|
||||||
|
sleep 1 # Simulate work
|
||||||
|
track_performance_test "test_operation" "$start_time"
|
||||||
|
|
||||||
|
# Verify entry was added
|
||||||
|
local entry_count=$(jq length "$test_perf_log")
|
||||||
|
if [ "$entry_count" -eq 1 ]; then
|
||||||
|
return 0
|
||||||
|
else
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
# Test: Notification system
|
||||||
|
test_notification_system() {
|
||||||
|
# Mock notification function
|
||||||
|
send_notification_test() {
|
||||||
|
local title="$1"
|
||||||
|
local message="$2"
|
||||||
|
local status="${3:-info}"
|
||||||
|
|
||||||
|
# Just verify parameters are received correctly
|
||||||
|
if [ -n "$title" ] && [ -n "$message" ]; then
|
||||||
|
echo "Notification: $title - $message ($status)" > "$TEST_DIR/notification.log"
|
||||||
|
return 0
|
||||||
|
else
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
# Test notification
|
||||||
|
send_notification_test "Test Title" "Test Message" "success"
|
||||||
|
|
||||||
|
# Verify notification was processed
|
||||||
|
if [ -f "$TEST_DIR/notification.log" ] && grep -q "Test Title" "$TEST_DIR/notification.log"; then
|
||||||
|
return 0
|
||||||
|
else
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
# Test: Checksum caching
|
||||||
|
test_checksum_caching() {
|
||||||
|
local test_file="$TEST_DIR/checksum_test.txt"
|
||||||
|
local cache_file="${test_file}.md5"
|
||||||
|
|
||||||
|
# Create test file
|
||||||
|
echo "test content" > "$test_file"
|
||||||
|
|
||||||
|
# Mock checksum function with caching
|
||||||
|
calculate_checksum_test() {
|
||||||
|
local file="$1"
|
||||||
|
local cache_file="${file}.md5"
|
||||||
|
local file_mtime=$(stat -c %Y "$file" 2>/dev/null || echo "0")
|
||||||
|
|
||||||
|
# Check cache
|
||||||
|
if [ -f "$cache_file" ]; then
|
||||||
|
local cache_mtime=$(stat -c %Y "$cache_file" 2>/dev/null || echo "0")
|
||||||
|
if [ "$cache_mtime" -gt "$file_mtime" ]; then
|
||||||
|
cat "$cache_file"
|
||||||
|
return 0
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Calculate and cache
|
||||||
|
local checksum=$(md5sum "$file" | cut -d' ' -f1)
|
||||||
|
echo "$checksum" > "$cache_file"
|
||||||
|
echo "$checksum"
|
||||||
|
}
|
||||||
|
|
||||||
|
# First calculation (should create cache)
|
||||||
|
local checksum1=$(calculate_checksum_test "$test_file")
|
||||||
|
|
||||||
|
# Second calculation (should use cache)
|
||||||
|
local checksum2=$(calculate_checksum_test "$test_file")
|
||||||
|
|
||||||
|
# Verify checksums match and cache file exists
|
||||||
|
if [ "$checksum1" = "$checksum2" ] && [ -f "$cache_file" ]; then
|
||||||
|
return 0
|
||||||
|
else
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
# Test: Backup verification
|
||||||
|
test_backup_verification() {
|
||||||
|
local src_file="$TEST_DIR/source.txt"
|
||||||
|
local dest_file="$TEST_DIR/backup.txt"
|
||||||
|
|
||||||
|
# Create identical files
|
||||||
|
echo "backup test content" > "$src_file"
|
||||||
|
cp "$src_file" "$dest_file"
|
||||||
|
|
||||||
|
# Mock verification function
|
||||||
|
verify_backup_test() {
|
||||||
|
local src="$1"
|
||||||
|
local dest="$2"
|
||||||
|
|
||||||
|
local src_checksum=$(md5sum "$src" | cut -d' ' -f1)
|
||||||
|
local dest_checksum=$(md5sum "$dest" | cut -d' ' -f1)
|
||||||
|
|
||||||
|
if [ "$src_checksum" = "$dest_checksum" ]; then
|
||||||
|
return 0
|
||||||
|
else
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
# Test verification
|
||||||
|
if verify_backup_test "$src_file" "$dest_file"; then
|
||||||
|
return 0
|
||||||
|
else
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
# Test: Parallel processing framework
|
||||||
|
test_parallel_processing() {
|
||||||
|
local temp_dir=$(mktemp -d)
|
||||||
|
local -a pids=()
|
||||||
|
local total_jobs=5
|
||||||
|
local completed_jobs=0
|
||||||
|
|
||||||
|
# Simulate parallel jobs
|
||||||
|
for i in $(seq 1 $total_jobs); do
|
||||||
|
(
|
||||||
|
# Simulate work
|
||||||
|
sleep 0.$i
|
||||||
|
echo "$i" > "$temp_dir/job_$i.result"
|
||||||
|
) &
|
||||||
|
pids+=($!)
|
||||||
|
done
|
||||||
|
|
||||||
|
# Wait for all jobs
|
||||||
|
for pid in "${pids[@]}"; do
|
||||||
|
if wait "$pid"; then
|
||||||
|
completed_jobs=$((completed_jobs + 1))
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
|
||||||
|
# Verify all jobs completed
|
||||||
|
local result_files=$(find "$temp_dir" -name "job_*.result" | wc -l)
|
||||||
|
|
||||||
|
# Cleanup
|
||||||
|
rm -rf "$temp_dir"
|
||||||
|
|
||||||
|
if [ "$completed_jobs" -eq "$total_jobs" ] && [ "$result_files" -eq "$total_jobs" ]; then
|
||||||
|
return 0
|
||||||
|
else
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
# Test: Database integrity check simulation
|
||||||
|
test_database_integrity() {
|
||||||
|
local test_db="$TEST_DIR/test.db"
|
||||||
|
|
||||||
|
# Create a simple SQLite database
|
||||||
|
sqlite3 "$test_db" "CREATE TABLE test (id INTEGER, name TEXT);"
|
||||||
|
sqlite3 "$test_db" "INSERT INTO test VALUES (1, 'test');"
|
||||||
|
|
||||||
|
# Mock integrity check
|
||||||
|
check_integrity_test() {
|
||||||
|
local db_file="$1"
|
||||||
|
|
||||||
|
# Use sqlite3 instead of Plex SQLite for testing
|
||||||
|
local result=$(sqlite3 "$db_file" "PRAGMA integrity_check;" 2>/dev/null)
|
||||||
|
|
||||||
|
if echo "$result" | grep -q "ok"; then
|
||||||
|
return 0
|
||||||
|
else
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
# Test integrity check
|
||||||
|
if check_integrity_test "$test_db"; then
|
||||||
|
return 0
|
||||||
|
else
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
# Test: Configuration parsing
|
||||||
|
test_configuration_parsing() {
|
||||||
|
# Mock command line parsing
|
||||||
|
parse_args_test() {
|
||||||
|
local args=("$@")
|
||||||
|
local auto_repair=false
|
||||||
|
local parallel=true
|
||||||
|
local webhook=""
|
||||||
|
|
||||||
|
for arg in "${args[@]}"; do
|
||||||
|
case "$arg" in
|
||||||
|
--auto-repair) auto_repair=true ;;
|
||||||
|
--no-parallel) parallel=false ;;
|
||||||
|
--webhook=*) webhook="${arg#*=}" ;;
|
||||||
|
esac
|
||||||
|
done
|
||||||
|
|
||||||
|
# Return parsed values
|
||||||
|
echo "$auto_repair $parallel $webhook"
|
||||||
|
}
|
||||||
|
|
||||||
|
# Test parsing
|
||||||
|
local result=$(parse_args_test --auto-repair --webhook=http://example.com)
|
||||||
|
|
||||||
|
if echo "$result" | grep -q "true true http://example.com"; then
|
||||||
|
return 0
|
||||||
|
else
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
# Test: Error handling
|
||||||
|
test_error_handling() {
|
||||||
|
# Mock function that can fail
|
||||||
|
test_function_with_error() {
|
||||||
|
local should_fail="$1"
|
||||||
|
|
||||||
|
if [ "$should_fail" = "true" ]; then
|
||||||
|
return 1
|
||||||
|
else
|
||||||
|
return 0
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
# Test success case
|
||||||
|
if test_function_with_error "false"; then
|
||||||
|
# Test failure case
|
||||||
|
if ! test_function_with_error "true"; then
|
||||||
|
return 0 # Both cases worked as expected
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
return 1
|
||||||
|
}
|
||||||
|
|
||||||
|
# Run all unit tests
|
||||||
|
run_all_tests() {
|
||||||
|
log_info "Setting up test environment"
|
||||||
|
setup_test_environment
|
||||||
|
|
||||||
|
log_info "Starting unit tests"
|
||||||
|
|
||||||
|
# Core functionality tests
|
||||||
|
run_test "JSON Log Initialization" test_json_log_initialization
|
||||||
|
run_test "Performance Tracking" test_performance_tracking
|
||||||
|
run_test "Notification System" test_notification_system
|
||||||
|
run_test "Checksum Caching" test_checksum_caching
|
||||||
|
run_test "Backup Verification" test_backup_verification
|
||||||
|
run_test "Parallel Processing" test_parallel_processing
|
||||||
|
run_test "Database Integrity Check" test_database_integrity
|
||||||
|
run_test "Configuration Parsing" test_configuration_parsing
|
||||||
|
run_test "Error Handling" test_error_handling
|
||||||
|
|
||||||
|
log_info "Unit tests completed"
|
||||||
|
}
|
||||||
|
|
||||||
|
# Run integration tests (requires actual Plex environment)
|
||||||
|
run_integration_tests() {
|
||||||
|
log_info "Starting integration tests"
|
||||||
|
log_warn "Integration tests require a working Plex installation"
|
||||||
|
|
||||||
|
# Check if Plex service exists
|
||||||
|
if ! systemctl list-units --all | grep -q plexmediaserver; then
|
||||||
|
log_warn "Plex service not found - skipping integration tests"
|
||||||
|
return 0
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Test actual service management (if safe to do so)
|
||||||
|
log_info "Integration tests would test actual Plex service management"
|
||||||
|
log_info "Skipping for safety - implement with caution"
|
||||||
|
}
|
||||||
|
|
||||||
|
# Run performance tests
|
||||||
|
run_performance_tests() {
|
||||||
|
log_info "Starting performance benchmarks"
|
||||||
|
|
||||||
|
local start_time=$(date +%s)
|
||||||
|
|
||||||
|
# Test file operations
|
||||||
|
local test_file="$TEST_DIR/perf_test.dat"
|
||||||
|
dd if=/dev/zero of="$test_file" bs=1M count=10 2>/dev/null
|
||||||
|
|
||||||
|
# Benchmark checksum calculation
|
||||||
|
local checksum_start=$(date +%s)
|
||||||
|
md5sum "$test_file" > /dev/null
|
||||||
|
local checksum_time=$(($(date +%s) - checksum_start))
|
||||||
|
|
||||||
|
# Benchmark compression
|
||||||
|
local compress_start=$(date +%s)
|
||||||
|
tar -czf "$TEST_DIR/perf_test.tar.gz" -C "$TEST_DIR" "perf_test.dat"
|
||||||
|
local compress_time=$(($(date +%s) - compress_start))
|
||||||
|
|
||||||
|
local total_time=$(($(date +%s) - start_time))
|
||||||
|
|
||||||
|
log_info "Performance Results:"
|
||||||
|
log_info " Checksum (10MB): ${checksum_time}s"
|
||||||
|
log_info " Compression (10MB): ${compress_time}s"
|
||||||
|
log_info " Total benchmark time: ${total_time}s"
|
||||||
|
|
||||||
|
# Record performance data
|
||||||
|
local perf_entry=$(jq -n \
|
||||||
|
--arg checksum_time "$checksum_time" \
|
||||||
|
--arg compress_time "$compress_time" \
|
||||||
|
--arg total_time "$total_time" \
|
||||||
|
--arg timestamp "$(date -Iseconds)" \
|
||||||
|
'{
|
||||||
|
benchmark: "performance_test",
|
||||||
|
checksum_time_seconds: ($checksum_time | tonumber),
|
||||||
|
compress_time_seconds: ($compress_time | tonumber),
|
||||||
|
total_time_seconds: ($total_time | tonumber),
|
||||||
|
timestamp: $timestamp
|
||||||
|
}')
|
||||||
|
|
||||||
|
echo "$perf_entry" > "$TEST_DIR/performance_results.json"
|
||||||
|
}
|
||||||
|
|
||||||
|
# Generate comprehensive test report
|
||||||
|
generate_test_report() {
|
||||||
|
local timestamp=$(date '+%Y-%m-%d %H:%M:%S')
|
||||||
|
|
||||||
|
echo
|
||||||
|
echo "=============================================="
|
||||||
|
echo " PLEX BACKUP TEST REPORT"
|
||||||
|
echo "=============================================="
|
||||||
|
echo "Test Run: $timestamp"
|
||||||
|
echo "Tests Run: $TESTS_RUN"
|
||||||
|
echo "Tests Passed: $TESTS_PASSED"
|
||||||
|
echo "Tests Failed: $TESTS_FAILED"
|
||||||
|
echo
|
||||||
|
|
||||||
|
if [ $TESTS_FAILED -gt 0 ]; then
|
||||||
|
echo "FAILED TESTS:"
|
||||||
|
for failed_test in "${FAILED_TESTS[@]}"; do
|
||||||
|
echo " - $failed_test"
|
||||||
|
done
|
||||||
|
echo
|
||||||
|
fi
|
||||||
|
|
||||||
|
local success_rate=0
|
||||||
|
if [ $TESTS_RUN -gt 0 ]; then
|
||||||
|
success_rate=$(( (TESTS_PASSED * 100) / TESTS_RUN ))
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo "Success Rate: ${success_rate}%"
|
||||||
|
echo
|
||||||
|
|
||||||
|
if [ $TESTS_FAILED -eq 0 ]; then
|
||||||
|
log_pass "All tests passed successfully!"
|
||||||
|
else
|
||||||
|
log_fail "Some tests failed - review output above"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Save detailed results
|
||||||
|
if [ -f "$TEST_RESULTS_FILE" ]; then
|
||||||
|
local report_file="$TEST_DIR/test_report_$(date +%Y%m%d_%H%M%S).json"
|
||||||
|
jq -n \
|
||||||
|
--arg timestamp "$timestamp" \
|
||||||
|
--arg tests_run "$TESTS_RUN" \
|
||||||
|
--arg tests_passed "$TESTS_PASSED" \
|
||||||
|
--arg tests_failed "$TESTS_FAILED" \
|
||||||
|
--arg success_rate "$success_rate" \
|
||||||
|
--argjson failed_tests "$(printf '%s\n' "${FAILED_TESTS[@]}" | jq -R . | jq -s .)" \
|
||||||
|
--argjson test_details "$(cat "$TEST_RESULTS_FILE")" \
|
||||||
|
'{
|
||||||
|
test_run_timestamp: $timestamp,
|
||||||
|
summary: {
|
||||||
|
tests_run: ($tests_run | tonumber),
|
||||||
|
tests_passed: ($tests_passed | tonumber),
|
||||||
|
tests_failed: ($tests_failed | tonumber),
|
||||||
|
success_rate_percent: ($success_rate | tonumber)
|
||||||
|
},
|
||||||
|
failed_tests: $failed_tests,
|
||||||
|
detailed_results: $test_details
|
||||||
|
}' > "$report_file"
|
||||||
|
|
||||||
|
log_info "Detailed test report saved to: $report_file"
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
# Integration tests (if requested)
|
||||||
|
run_integration_tests() {
|
||||||
|
log_info "Running integration tests..."
|
||||||
|
|
||||||
|
# Note: These would require actual Plex installation
|
||||||
|
# For now, we'll just indicate what would be tested
|
||||||
|
|
||||||
|
log_warn "Integration tests require running Plex Media Server"
|
||||||
|
log_warn "These tests would cover:"
|
||||||
|
log_warn " - Service stop/start functionality"
|
||||||
|
log_warn " - Database integrity checks"
|
||||||
|
log_warn " - Full backup and restore cycles"
|
||||||
|
log_warn " - Performance under load"
|
||||||
|
}
|
||||||
|
|
||||||
|
# Performance benchmarks
|
||||||
|
run_performance_tests() {
|
||||||
|
log_info "Running performance benchmarks..."
|
||||||
|
|
||||||
|
local start_time=$(date +%s)
|
||||||
|
|
||||||
|
# Create large test files
|
||||||
|
local large_file="$TEST_DIR/large_test.db"
|
||||||
|
dd if=/dev/zero of="$large_file" bs=1M count=100 2>/dev/null
|
||||||
|
|
||||||
|
# Benchmark checksum calculation
|
||||||
|
local checksum_start=$(date +%s)
|
||||||
|
md5sum "$large_file" > /dev/null
|
||||||
|
local checksum_end=$(date +%s)
|
||||||
|
local checksum_time=$((checksum_end - checksum_start))
|
||||||
|
|
||||||
|
# Benchmark compression
|
||||||
|
local compress_start=$(date +%s)
|
||||||
|
tar -czf "$TEST_DIR/large_test.tar.gz" -C "$TEST_DIR" "large_test.db"
|
||||||
|
local compress_end=$(date +%s)
|
||||||
|
local compress_time=$((compress_end - compress_start))
|
||||||
|
|
||||||
|
local total_time=$(($(date +%s) - start_time))
|
||||||
|
|
||||||
|
log_info "Performance Results:"
|
||||||
|
log_info " Checksum (100MB): ${checksum_time}s"
|
||||||
|
log_info " Compression (100MB): ${compress_time}s"
|
||||||
|
log_info " Total benchmark time: ${total_time}s"
|
||||||
|
}
|
||||||
|
|
||||||
|
# Main execution
|
||||||
|
main() {
|
||||||
|
case "${1:-all}" in
|
||||||
|
"unit")
|
||||||
|
run_all_tests
|
||||||
|
;;
|
||||||
|
"integration")
|
||||||
|
run_integration_tests
|
||||||
|
;;
|
||||||
|
"performance")
|
||||||
|
run_performance_tests
|
||||||
|
;;
|
||||||
|
"all")
|
||||||
|
run_all_tests
|
||||||
|
# Uncomment for integration tests if environment supports it
|
||||||
|
# run_integration_tests
|
||||||
|
run_performance_tests
|
||||||
|
;;
|
||||||
|
*)
|
||||||
|
echo "Usage: $0 [unit|integration|performance|all]"
|
||||||
|
echo " unit - Run unit tests only"
|
||||||
|
echo " integration - Run integration tests (requires Plex)"
|
||||||
|
echo " performance - Run performance benchmarks"
|
||||||
|
echo " all - Run all available tests"
|
||||||
|
exit 1
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
|
||||||
|
generate_test_report
|
||||||
|
|
||||||
|
# Exit with appropriate code
|
||||||
|
if [ $TESTS_FAILED -gt 0 ]; then
|
||||||
|
exit 1
|
||||||
|
else
|
||||||
|
exit 0
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
# Trap to ensure cleanup on exit
|
||||||
|
trap cleanup_test_environment EXIT
|
||||||
|
|
||||||
|
main "$@"
|
||||||
@@ -110,7 +110,8 @@ log_info() {
|
|||||||
|
|
||||||
# Log synchronization functions
|
# Log synchronization functions
|
||||||
sync_logs_to_shared() {
|
sync_logs_to_shared() {
|
||||||
local sync_start_time=$(date +%s)
|
local sync_start_time
|
||||||
|
sync_start_time=$(date +%s)
|
||||||
log_info "Starting log synchronization to shared location"
|
log_info "Starting log synchronization to shared location"
|
||||||
|
|
||||||
# Ensure shared log directory exists
|
# Ensure shared log directory exists
|
||||||
@@ -131,7 +132,8 @@ sync_logs_to_shared() {
|
|||||||
|
|
||||||
for log_file in "$LOCAL_LOG_ROOT"/*.log; do
|
for log_file in "$LOCAL_LOG_ROOT"/*.log; do
|
||||||
if [ -f "$log_file" ]; then
|
if [ -f "$log_file" ]; then
|
||||||
local filename=$(basename "$log_file")
|
local filename
|
||||||
|
filename=$(basename "$log_file")
|
||||||
local shared_file="$SHARED_LOG_ROOT/$filename"
|
local shared_file="$SHARED_LOG_ROOT/$filename"
|
||||||
|
|
||||||
# Only copy if file doesn't exist in shared location or local is newer
|
# Only copy if file doesn't exist in shared location or local is newer
|
||||||
@@ -147,7 +149,8 @@ sync_logs_to_shared() {
|
|||||||
fi
|
fi
|
||||||
done
|
done
|
||||||
|
|
||||||
local sync_end_time=$(date +%s)
|
local sync_end_time
|
||||||
|
sync_end_time=$(date +%s)
|
||||||
local sync_duration=$((sync_end_time - sync_start_time))
|
local sync_duration=$((sync_end_time - sync_start_time))
|
||||||
|
|
||||||
if [ $error_count -eq 0 ]; then
|
if [ $error_count -eq 0 ]; then
|
||||||
@@ -161,7 +164,8 @@ sync_logs_to_shared() {
|
|||||||
|
|
||||||
# Cleanup old local logs (30 day retention)
|
# Cleanup old local logs (30 day retention)
|
||||||
cleanup_old_local_logs() {
|
cleanup_old_local_logs() {
|
||||||
local cleanup_start_time=$(date +%s)
|
local cleanup_start_time
|
||||||
|
cleanup_start_time=$(date +%s)
|
||||||
log_info "Starting cleanup of old local logs (30+ days)"
|
log_info "Starting cleanup of old local logs (30+ days)"
|
||||||
|
|
||||||
if [ ! -d "$LOCAL_LOG_ROOT" ]; then
|
if [ ! -d "$LOCAL_LOG_ROOT" ]; then
|
||||||
@@ -174,7 +178,8 @@ cleanup_old_local_logs() {
|
|||||||
|
|
||||||
# Find and remove log files older than 30 days
|
# Find and remove log files older than 30 days
|
||||||
while IFS= read -r -d '' old_file; do
|
while IFS= read -r -d '' old_file; do
|
||||||
local filename=$(basename "$old_file")
|
local filename
|
||||||
|
filename=$(basename "$old_file")
|
||||||
if rm "$old_file" 2>/dev/null; then
|
if rm "$old_file" 2>/dev/null; then
|
||||||
((cleanup_count++))
|
((cleanup_count++))
|
||||||
log_info "Removed old log: $filename"
|
log_info "Removed old log: $filename"
|
||||||
@@ -184,7 +189,8 @@ cleanup_old_local_logs() {
|
|||||||
fi
|
fi
|
||||||
done < <(find "$LOCAL_LOG_ROOT" -name "*.log" -mtime +30 -print0 2>/dev/null)
|
done < <(find "$LOCAL_LOG_ROOT" -name "*.log" -mtime +30 -print0 2>/dev/null)
|
||||||
|
|
||||||
local cleanup_end_time=$(date +%s)
|
local cleanup_end_time
|
||||||
|
cleanup_end_time=$(date +%s)
|
||||||
local cleanup_duration=$((cleanup_end_time - cleanup_start_time))
|
local cleanup_duration=$((cleanup_end_time - cleanup_start_time))
|
||||||
|
|
||||||
if [ $cleanup_count -gt 0 ]; then
|
if [ $cleanup_count -gt 0 ]; then
|
||||||
@@ -235,7 +241,8 @@ validate_backup_structure() {
|
|||||||
return 1
|
return 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
local backup_count=$(find "$BACKUP_ROOT" -maxdepth 1 -type f -name "plex-backup-*.tar.gz" | wc -l)
|
local backup_count
|
||||||
|
backup_count=$(find "$BACKUP_ROOT" -maxdepth 1 -type f -name "plex-backup-*.tar.gz" | wc -l)
|
||||||
log_info "Found $backup_count backup files"
|
log_info "Found $backup_count backup files"
|
||||||
|
|
||||||
if [ "$backup_count" -eq 0 ]; then
|
if [ "$backup_count" -eq 0 ]; then
|
||||||
@@ -249,7 +256,8 @@ validate_backup_structure() {
|
|||||||
# Validate individual backup
|
# Validate individual backup
|
||||||
validate_backup() {
|
validate_backup() {
|
||||||
local backup_file="$1"
|
local backup_file="$1"
|
||||||
local backup_name=$(basename "$backup_file")
|
local backup_name
|
||||||
|
backup_name=$(basename "$backup_file")
|
||||||
local errors=0
|
local errors=0
|
||||||
|
|
||||||
log_info "Validating backup: $backup_name"
|
log_info "Validating backup: $backup_name"
|
||||||
@@ -268,7 +276,8 @@ validate_backup() {
|
|||||||
log_success "Archive integrity check passed: $backup_name"
|
log_success "Archive integrity check passed: $backup_name"
|
||||||
|
|
||||||
# Check for expected files in archive
|
# Check for expected files in archive
|
||||||
local archive_contents=$(tar -tzf "$backup_file" 2>/dev/null)
|
local archive_contents
|
||||||
|
archive_contents=$(tar -tzf "$backup_file" 2>/dev/null)
|
||||||
|
|
||||||
# Check if this is a legacy backup with dated subdirectory
|
# Check if this is a legacy backup with dated subdirectory
|
||||||
local has_dated_subdir=false
|
local has_dated_subdir=false
|
||||||
@@ -300,8 +309,11 @@ validate_backup() {
|
|||||||
log_success " Found: $file"
|
log_success " Found: $file"
|
||||||
else
|
else
|
||||||
# Check if this is an optional file that might not exist in older backups
|
# Check if this is an optional file that might not exist in older backups
|
||||||
local backup_name=$(basename "$backup_file")
|
local backup_name
|
||||||
local backup_datetime=$(echo "$backup_name" | sed 's/plex-backup-\([0-9]\{8\}_[0-9]\{6\}\)\.tar\.gz/\1/')
|
backup_name=$(basename "$backup_file")
|
||||||
|
local backup_datetime
|
||||||
|
backup_datetime=${backup_name#plex-backup-}
|
||||||
|
backup_datetime=${backup_datetime%.tar.gz}
|
||||||
|
|
||||||
if [[ -n "${OPTIONAL_FILES[$file]}" ]] && [[ "$backup_datetime" < "${OPTIONAL_FILES[$file]}" ]]; then
|
if [[ -n "${OPTIONAL_FILES[$file]}" ]] && [[ "$backup_datetime" < "${OPTIONAL_FILES[$file]}" ]]; then
|
||||||
log_warning " Missing file (expected for backup date): $file"
|
log_warning " Missing file (expected for backup date): $file"
|
||||||
@@ -327,7 +339,7 @@ validate_backup() {
|
|||||||
# Legacy format: extract filename from dated subdirectory
|
# Legacy format: extract filename from dated subdirectory
|
||||||
filename="${BASH_REMATCH[1]}"
|
filename="${BASH_REMATCH[1]}"
|
||||||
# Remove timestamp suffix if present
|
# Remove timestamp suffix if present
|
||||||
filename=$(echo "$filename" | sed 's/\.[0-9]\{8\}_[0-9]\{6\}$//')
|
filename=${filename%.*_*}
|
||||||
elif [[ "$line" =~ ^\./(.+)$ ]]; then
|
elif [[ "$line" =~ ^\./(.+)$ ]]; then
|
||||||
# New format: extract filename from ./ prefix
|
# New format: extract filename from ./ prefix
|
||||||
filename="${BASH_REMATCH[1]}"
|
filename="${BASH_REMATCH[1]}"
|
||||||
@@ -365,16 +377,20 @@ validate_backup() {
|
|||||||
check_backup_freshness() {
|
check_backup_freshness() {
|
||||||
log_info "Checking backup freshness..."
|
log_info "Checking backup freshness..."
|
||||||
|
|
||||||
local latest_backup=$(find "$BACKUP_ROOT" -maxdepth 1 -type f -name "plex-backup-*.tar.gz" 2>/dev/null | sort | tail -1)
|
local latest_backup
|
||||||
|
latest_backup=$(find "$BACKUP_ROOT" -maxdepth 1 -type f -name "plex-backup-*.tar.gz" 2>/dev/null | sort | tail -1)
|
||||||
|
|
||||||
if [ -z "$latest_backup" ]; then
|
if [ -z "$latest_backup" ]; then
|
||||||
log_error "No backups found"
|
log_error "No backups found"
|
||||||
return 1
|
return 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
local backup_filename=$(basename "$latest_backup")
|
local backup_filename
|
||||||
|
backup_filename=$(basename "$latest_backup")
|
||||||
# Extract date from filename: plex-backup-YYYYMMDD_HHMMSS.tar.gz
|
# Extract date from filename: plex-backup-YYYYMMDD_HHMMSS.tar.gz
|
||||||
local backup_datetime=$(echo "$backup_filename" | sed 's/plex-backup-\([0-9]\{8\}_[0-9]\{6\}\)\.tar\.gz/\1/')
|
local backup_datetime
|
||||||
|
backup_datetime=${backup_filename#plex-backup-}
|
||||||
|
backup_datetime=${backup_datetime%.tar.gz}
|
||||||
|
|
||||||
# Validate that we extracted a valid datetime
|
# Validate that we extracted a valid datetime
|
||||||
if [[ ! "$backup_datetime" =~ ^[0-9]{8}_[0-9]{6}$ ]]; then
|
if [[ ! "$backup_datetime" =~ ^[0-9]{8}_[0-9]{6}$ ]]; then
|
||||||
@@ -390,7 +406,8 @@ check_backup_freshness() {
|
|||||||
return 1
|
return 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
local current_timestamp=$(date +%s)
|
local current_timestamp
|
||||||
|
current_timestamp=$(date +%s)
|
||||||
local age_days=$(( (current_timestamp - backup_timestamp) / 86400 ))
|
local age_days=$(( (current_timestamp - backup_timestamp) / 86400 ))
|
||||||
|
|
||||||
log_info "Latest backup: $backup_datetime ($age_days days old)"
|
log_info "Latest backup: $backup_datetime ($age_days days old)"
|
||||||
@@ -427,7 +444,8 @@ check_backup_sizes() {
|
|||||||
while IFS= read -r backup_file; do
|
while IFS= read -r backup_file; do
|
||||||
if [ -f "$backup_file" ] && [ -r "$backup_file" ]; then
|
if [ -f "$backup_file" ] && [ -r "$backup_file" ]; then
|
||||||
backup_files+=("$backup_file")
|
backup_files+=("$backup_file")
|
||||||
local size=$(stat -f%z "$backup_file" 2>/dev/null || stat -c%s "$backup_file" 2>/dev/null || echo "0")
|
local size
|
||||||
|
size=$(stat -f%z "$backup_file" 2>/dev/null || stat -c%s "$backup_file" 2>/dev/null || echo "0")
|
||||||
backup_sizes+=("$size")
|
backup_sizes+=("$size")
|
||||||
total_size=$((total_size + size))
|
total_size=$((total_size + size))
|
||||||
fi
|
fi
|
||||||
@@ -440,8 +458,10 @@ check_backup_sizes() {
|
|||||||
|
|
||||||
# Calculate average size
|
# Calculate average size
|
||||||
local avg_size=$((total_size / ${#backup_files[@]}))
|
local avg_size=$((total_size / ${#backup_files[@]}))
|
||||||
local human_total=$(numfmt --to=iec "$total_size" 2>/dev/null || echo "${total_size} bytes")
|
local human_total
|
||||||
local human_avg=$(numfmt --to=iec "$avg_size" 2>/dev/null || echo "${avg_size} bytes")
|
human_total=$(numfmt --to=iec "$total_size" 2>/dev/null || echo "${total_size} bytes")
|
||||||
|
local human_avg
|
||||||
|
human_avg=$(numfmt --to=iec "$avg_size" 2>/dev/null || echo "${avg_size} bytes")
|
||||||
|
|
||||||
log_info "Total backup size: $human_total"
|
log_info "Total backup size: $human_total"
|
||||||
log_info "Average backup size: $human_avg"
|
log_info "Average backup size: $human_avg"
|
||||||
@@ -453,13 +473,17 @@ check_backup_sizes() {
|
|||||||
for i in "${!backup_files[@]}"; do
|
for i in "${!backup_files[@]}"; do
|
||||||
local file="${backup_files[$i]}"
|
local file="${backup_files[$i]}"
|
||||||
local size="${backup_sizes[$i]}"
|
local size="${backup_sizes[$i]}"
|
||||||
local filename=$(basename "$file")
|
local filename
|
||||||
|
filename=$(basename "$file")
|
||||||
|
|
||||||
if [ "$size" -lt "$min_size" ] && [ "$size" -gt 0 ]; then
|
if [ "$size" -lt "$min_size" ] && [ "$size" -gt 0 ]; then
|
||||||
local human_size=$(numfmt --to=iec "$size" 2>/dev/null || echo "${size} bytes")
|
local human_size
|
||||||
|
human_size=$(numfmt --to=iec "$size" 2>/dev/null || echo "${size} bytes")
|
||||||
|
|
||||||
# Extract backup datetime to check if it's a pre-blobs backup
|
# Extract backup datetime to check if it's a pre-blobs backup
|
||||||
local backup_datetime=$(echo "$filename" | sed 's/plex-backup-\([0-9]\{8\}_[0-9]\{6\}\)\.tar\.gz/\1/')
|
local backup_datetime
|
||||||
|
backup_datetime=${filename#plex-backup-}
|
||||||
|
backup_datetime=${backup_datetime%.tar.gz}
|
||||||
if [[ "$backup_datetime" =~ ^[0-9]{8}_[0-9]{6}$ ]] && [[ "$backup_datetime" < "20250526_144500" ]]; then
|
if [[ "$backup_datetime" =~ ^[0-9]{8}_[0-9]{6}$ ]] && [[ "$backup_datetime" < "20250526_144500" ]]; then
|
||||||
log_info "Small backup (pre-blobs DB): $filename ($human_size)"
|
log_info "Small backup (pre-blobs DB): $filename ($human_size)"
|
||||||
log_info " This backup predates the blobs database introduction, size is expected"
|
log_info " This backup predates the blobs database introduction, size is expected"
|
||||||
@@ -484,9 +508,12 @@ check_backup_sizes() {
|
|||||||
check_disk_space() {
|
check_disk_space() {
|
||||||
log_info "Checking disk space..."
|
log_info "Checking disk space..."
|
||||||
|
|
||||||
local backup_disk_usage=$(du -sh "$BACKUP_ROOT" | cut -f1)
|
local backup_disk_usage
|
||||||
local available_space=$(df -h "$BACKUP_ROOT" | awk 'NR==2 {print $4}')
|
backup_disk_usage=$(du -sh "$BACKUP_ROOT" | cut -f1)
|
||||||
local used_percentage=$(df "$BACKUP_ROOT" | awk 'NR==2 {print $5}' | sed 's/%//')
|
local available_space
|
||||||
|
available_space=$(df -h "$BACKUP_ROOT" | awk 'NR==2 {print $4}')
|
||||||
|
local used_percentage
|
||||||
|
used_percentage=$(df "$BACKUP_ROOT" | awk 'NR==2 {print $5}' | sed 's/%//')
|
||||||
|
|
||||||
log_info "Backup disk usage: $backup_disk_usage"
|
log_info "Backup disk usage: $backup_disk_usage"
|
||||||
log_info "Available space: $available_space"
|
log_info "Available space: $available_space"
|
||||||
@@ -513,10 +540,12 @@ generate_report() {
|
|||||||
local total_errors=0
|
local total_errors=0
|
||||||
|
|
||||||
# Header
|
# Header
|
||||||
echo "==================================" >> "$REPORT_FILE"
|
{
|
||||||
echo "Plex Backup Validation Report" >> "$REPORT_FILE"
|
echo "=================================="
|
||||||
echo "Generated: $(date)" >> "$REPORT_FILE"
|
echo "Plex Backup Validation Report"
|
||||||
echo "==================================" >> "$REPORT_FILE"
|
echo "Generated: $(date)"
|
||||||
|
echo "=================================="
|
||||||
|
} >> "$REPORT_FILE"
|
||||||
|
|
||||||
# Use process substitution to avoid subshell variable scope issues
|
# Use process substitution to avoid subshell variable scope issues
|
||||||
while IFS= read -r backup_file; do
|
while IFS= read -r backup_file; do
|
||||||
@@ -532,11 +561,13 @@ generate_report() {
|
|||||||
done < <(find "$BACKUP_ROOT" -maxdepth 1 -type f -name "plex-backup-*.tar.gz" | sort)
|
done < <(find "$BACKUP_ROOT" -maxdepth 1 -type f -name "plex-backup-*.tar.gz" | sort)
|
||||||
|
|
||||||
# Summary
|
# Summary
|
||||||
echo >> "$REPORT_FILE"
|
{
|
||||||
echo "Summary:" >> "$REPORT_FILE"
|
echo
|
||||||
echo " Total backups: $total_backups" >> "$REPORT_FILE"
|
echo "Summary:"
|
||||||
echo " Valid backups: $valid_backups" >> "$REPORT_FILE"
|
echo " Total backups: $total_backups"
|
||||||
echo " Total errors: $total_errors" >> "$REPORT_FILE"
|
echo " Valid backups: $valid_backups"
|
||||||
|
echo " Total errors: $total_errors"
|
||||||
|
} >> "$REPORT_FILE"
|
||||||
|
|
||||||
log_success "Report generated: $REPORT_FILE"
|
log_success "Report generated: $REPORT_FILE"
|
||||||
}
|
}
|
||||||
@@ -546,7 +577,8 @@ fix_issues() {
|
|||||||
log_info "Attempting to fix common issues..."
|
log_info "Attempting to fix common issues..."
|
||||||
|
|
||||||
# Create corrupted backups directory
|
# Create corrupted backups directory
|
||||||
local corrupted_dir="$(dirname "$REPORT_FILE")/corrupted-backups"
|
local corrupted_dir
|
||||||
|
corrupted_dir="$(dirname "$REPORT_FILE")/corrupted-backups"
|
||||||
mkdir -p "$corrupted_dir"
|
mkdir -p "$corrupted_dir"
|
||||||
|
|
||||||
# Check for and move corrupted backup files using process substitution
|
# Check for and move corrupted backup files using process substitution
|
||||||
@@ -554,7 +586,8 @@ fix_issues() {
|
|||||||
while IFS= read -r backup_file; do
|
while IFS= read -r backup_file; do
|
||||||
if ! tar -tzf "$backup_file" >/dev/null 2>&1; then
|
if ! tar -tzf "$backup_file" >/dev/null 2>&1; then
|
||||||
log_warning "Found corrupted backup: $(basename "$backup_file")"
|
log_warning "Found corrupted backup: $(basename "$backup_file")"
|
||||||
local backup_name=$(basename "$backup_file")
|
local backup_name
|
||||||
|
backup_name=$(basename "$backup_file")
|
||||||
local corrupted_backup="$corrupted_dir/$backup_name"
|
local corrupted_backup="$corrupted_dir/$backup_name"
|
||||||
|
|
||||||
if mv "$backup_file" "$corrupted_backup"; then
|
if mv "$backup_file" "$corrupted_backup"; then
|
||||||
@@ -650,9 +683,9 @@ main() {
|
|||||||
|
|
||||||
# Check backup freshness
|
# Check backup freshness
|
||||||
if ! check_backup_freshness; then
|
if ! check_backup_freshness; then
|
||||||
local freshness_result=$?
|
|
||||||
# Check if this is a "no backups found" error vs "old backup" warning
|
# Check if this is a "no backups found" error vs "old backup" warning
|
||||||
local backup_count=$(find "$BACKUP_ROOT" -maxdepth 1 -type f -name "plex-backup-*.tar.gz" 2>/dev/null | wc -l)
|
local backup_count
|
||||||
|
backup_count=$(find "$BACKUP_ROOT" -maxdepth 1 -type f -name "plex-backup-*.tar.gz" 2>/dev/null | wc -l)
|
||||||
if [ "$backup_count" -eq 0 ]; then
|
if [ "$backup_count" -eq 0 ]; then
|
||||||
# No backups found - critical error
|
# No backups found - critical error
|
||||||
critical_errors=$((critical_errors + 1))
|
critical_errors=$((critical_errors + 1))
|
||||||
|
|||||||
@@ -82,11 +82,13 @@ check_service_status() {
|
|||||||
print_status "$GREEN" "✓ Plex Media Server is running"
|
print_status "$GREEN" "✓ Plex Media Server is running"
|
||||||
|
|
||||||
# Get service uptime
|
# Get service uptime
|
||||||
local uptime=$(systemctl show plexmediaserver --property=ActiveEnterTimestamp --value)
|
local uptime
|
||||||
|
uptime=$(systemctl show plexmediaserver --property=ActiveEnterTimestamp --value)
|
||||||
print_status "$GREEN" " Started: $uptime"
|
print_status "$GREEN" " Started: $uptime"
|
||||||
|
|
||||||
# Get memory usage
|
# Get memory usage
|
||||||
local memory=$(systemctl show plexmediaserver --property=MemoryCurrent --value)
|
local memory
|
||||||
|
memory=$(systemctl show plexmediaserver --property=MemoryCurrent --value)
|
||||||
if [[ -n "$memory" && "$memory" != "[not set]" ]]; then
|
if [[ -n "$memory" && "$memory" != "[not set]" ]]; then
|
||||||
local memory_mb=$((memory / 1024 / 1024))
|
local memory_mb=$((memory / 1024 / 1024))
|
||||||
print_status "$GREEN" " Memory usage: ${memory_mb}MB"
|
print_status "$GREEN" " Memory usage: ${memory_mb}MB"
|
||||||
@@ -109,12 +111,14 @@ check_database_integrity() {
|
|||||||
|
|
||||||
# Check main database
|
# Check main database
|
||||||
if [[ -f "$main_db" ]]; then
|
if [[ -f "$main_db" ]]; then
|
||||||
local main_size=$(du -h "$main_db" | cut -f1)
|
local main_size
|
||||||
|
main_size=$(du -h "$main_db" | cut -f1)
|
||||||
print_status "$GREEN" "✓ Main database exists (${main_size})"
|
print_status "$GREEN" "✓ Main database exists (${main_size})"
|
||||||
|
|
||||||
# Try basic database operations
|
# Try basic database operations
|
||||||
if sqlite3 "$main_db" "SELECT COUNT(*) FROM sqlite_master WHERE type='table';" >/dev/null 2>&1; then
|
if sqlite3 "$main_db" "SELECT COUNT(*) FROM sqlite_master WHERE type='table';" >/dev/null 2>&1; then
|
||||||
local table_count=$(sqlite3 "$main_db" "SELECT COUNT(*) FROM sqlite_master WHERE type='table';" 2>/dev/null)
|
local table_count
|
||||||
|
table_count=$(sqlite3 "$main_db" "SELECT COUNT(*) FROM sqlite_master WHERE type='table';" 2>/dev/null)
|
||||||
print_status "$GREEN" " Contains $table_count tables"
|
print_status "$GREEN" " Contains $table_count tables"
|
||||||
else
|
else
|
||||||
print_status "$YELLOW" " Warning: Cannot query database tables"
|
print_status "$YELLOW" " Warning: Cannot query database tables"
|
||||||
@@ -127,13 +131,15 @@ check_database_integrity() {
|
|||||||
|
|
||||||
# Check blobs database
|
# Check blobs database
|
||||||
if [[ -f "$blobs_db" ]]; then
|
if [[ -f "$blobs_db" ]]; then
|
||||||
local blobs_size=$(du -h "$blobs_db" | cut -f1)
|
local blobs_size
|
||||||
|
blobs_size=$(du -h "$blobs_db" | cut -f1)
|
||||||
print_status "$GREEN" "✓ Blobs database exists (${blobs_size})"
|
print_status "$GREEN" "✓ Blobs database exists (${blobs_size})"
|
||||||
|
|
||||||
# Check if it's not empty (previous corruption was 0 bytes)
|
# Check if it's not empty (previous corruption was 0 bytes)
|
||||||
local blobs_bytes=$(stat -c%s "$blobs_db" 2>/dev/null || stat -f%z "$blobs_db" 2>/dev/null)
|
local blobs_bytes
|
||||||
|
blobs_bytes=$(stat -c%s "$blobs_db" 2>/dev/null || stat -f%z "$blobs_db" 2>/dev/null)
|
||||||
if [[ $blobs_bytes -gt 1000000 ]]; then
|
if [[ $blobs_bytes -gt 1000000 ]]; then
|
||||||
print_status "$GREEN" " File size is healthy ($(numfmt --to=iec $blobs_bytes))"
|
print_status "$GREEN" " File size is healthy ($(numfmt --to=iec "$blobs_bytes"))"
|
||||||
else
|
else
|
||||||
print_status "$RED" " Warning: File size is too small ($blobs_bytes bytes)"
|
print_status "$RED" " Warning: File size is too small ($blobs_bytes bytes)"
|
||||||
all_good=false
|
all_good=false
|
||||||
@@ -144,8 +150,10 @@ check_database_integrity() {
|
|||||||
fi
|
fi
|
||||||
|
|
||||||
# Check file ownership
|
# Check file ownership
|
||||||
local main_owner=$(stat -c%U:%G "$main_db" 2>/dev/null)
|
local main_owner
|
||||||
local blobs_owner=$(stat -c%U:%G "$blobs_db" 2>/dev/null)
|
main_owner=$(stat -c%U:%G "$main_db" 2>/dev/null)
|
||||||
|
local blobs_owner
|
||||||
|
blobs_owner=$(stat -c%U:%G "$blobs_db" 2>/dev/null)
|
||||||
|
|
||||||
if [[ "$main_owner" == "plex:plex" && "$blobs_owner" == "plex:plex" ]]; then
|
if [[ "$main_owner" == "plex:plex" && "$blobs_owner" == "plex:plex" ]]; then
|
||||||
print_status "$GREEN" "✓ Database ownership is correct (plex:plex)"
|
print_status "$GREEN" "✓ Database ownership is correct (plex:plex)"
|
||||||
@@ -154,7 +162,11 @@ check_database_integrity() {
|
|||||||
print_status "$YELLOW" " Main DB: $main_owner, Blobs DB: $blobs_owner"
|
print_status "$YELLOW" " Main DB: $main_owner, Blobs DB: $blobs_owner"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
return $([[ "$all_good" == "true" ]] && echo 0 || echo 1)
|
if [[ "$all_good" == "true" ]]; then
|
||||||
|
return 0
|
||||||
|
else
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
}
|
}
|
||||||
|
|
||||||
# Check web interface
|
# Check web interface
|
||||||
@@ -185,7 +197,8 @@ check_api_functionality() {
|
|||||||
print_header "API FUNCTIONALITY CHECK"
|
print_header "API FUNCTIONALITY CHECK"
|
||||||
|
|
||||||
# Test root API endpoint
|
# Test root API endpoint
|
||||||
local api_response=$(curl -s "http://localhost:32400/" 2>/dev/null)
|
local api_response
|
||||||
|
api_response=$(curl -s "http://localhost:32400/" 2>/dev/null)
|
||||||
|
|
||||||
if echo "$api_response" | grep -q "Unauthorized\|web/index.html"; then
|
if echo "$api_response" | grep -q "Unauthorized\|web/index.html"; then
|
||||||
print_status "$GREEN" "✓ API is responding (redirect to web interface)"
|
print_status "$GREEN" "✓ API is responding (redirect to web interface)"
|
||||||
@@ -194,7 +207,8 @@ check_api_functionality() {
|
|||||||
fi
|
fi
|
||||||
|
|
||||||
# Try to get server identity (this might work without auth)
|
# Try to get server identity (this might work without auth)
|
||||||
local identity_response=$(curl -s "http://localhost:32400/identity" 2>/dev/null)
|
local identity_response
|
||||||
|
identity_response=$(curl -s "http://localhost:32400/identity" 2>/dev/null)
|
||||||
|
|
||||||
if echo "$identity_response" | grep -q "MediaContainer"; then
|
if echo "$identity_response" | grep -q "MediaContainer"; then
|
||||||
print_status "$GREEN" "✓ Server identity endpoint working"
|
print_status "$GREEN" "✓ Server identity endpoint working"
|
||||||
|
|||||||
272
plex/validate-plex-recovery.sh.sc2086_backup
Executable file
272
plex/validate-plex-recovery.sh.sc2086_backup
Executable file
@@ -0,0 +1,272 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
################################################################################
|
||||||
|
# Plex Recovery Validation Script
|
||||||
|
################################################################################
|
||||||
|
#
|
||||||
|
# Author: Peter Wood <peter@peterwood.dev>
|
||||||
|
# Description: Comprehensive validation script that verifies the success of
|
||||||
|
# Plex database recovery operations. Performs extensive checks
|
||||||
|
# on database integrity, service functionality, and system health
|
||||||
|
# to ensure complete recovery and operational readiness.
|
||||||
|
#
|
||||||
|
# Features:
|
||||||
|
# - Database integrity verification
|
||||||
|
# - Service functionality testing
|
||||||
|
# - Library accessibility checks
|
||||||
|
# - Performance validation
|
||||||
|
# - Web interface connectivity testing
|
||||||
|
# - Comprehensive recovery reporting
|
||||||
|
# - Post-recovery optimization suggestions
|
||||||
|
#
|
||||||
|
# Related Scripts:
|
||||||
|
# - recover-plex-database.sh: Primary recovery script validated by this tool
|
||||||
|
# - icu-aware-recovery.sh: ICU recovery validation
|
||||||
|
# - nuclear-plex-recovery.sh: Nuclear recovery validation
|
||||||
|
# - backup-plex.sh: Backup system that enables recovery
|
||||||
|
# - validate-plex-backups.sh: Backup validation tools
|
||||||
|
# - plex.sh: General Plex service management
|
||||||
|
#
|
||||||
|
# Usage:
|
||||||
|
# ./validate-plex-recovery.sh # Full validation suite
|
||||||
|
# ./validate-plex-recovery.sh --quick # Quick validation checks
|
||||||
|
# ./validate-plex-recovery.sh --detailed # Detailed analysis and reporting
|
||||||
|
# ./validate-plex-recovery.sh --performance # Performance validation only
|
||||||
|
#
|
||||||
|
# Dependencies:
|
||||||
|
# - sqlite3 or Plex SQLite binary
|
||||||
|
# - curl (for web interface testing)
|
||||||
|
# - systemctl (for service status checks)
|
||||||
|
# - Plex Media Server
|
||||||
|
#
|
||||||
|
# Exit Codes:
|
||||||
|
# 0 - Recovery validation successful
|
||||||
|
# 1 - General error
|
||||||
|
# 2 - Database validation failures
|
||||||
|
# 3 - Service functionality issues
|
||||||
|
# 4 - Performance concerns detected
|
||||||
|
# 5 - Partial recovery (requires attention)
|
||||||
|
#
|
||||||
|
################################################################################
|
||||||
|
|
||||||
|
# Final Plex Recovery Validation Script
|
||||||
|
# Comprehensive check to ensure Plex is fully recovered and functional
|
||||||
|
|
||||||
|
# Colors for output
|
||||||
|
RED='\033[0;31m'
|
||||||
|
GREEN='\033[0;32m'
|
||||||
|
YELLOW='\033[1;33m'
|
||||||
|
BLUE='\033[0;34m'
|
||||||
|
NC='\033[0m' # No Color
|
||||||
|
|
||||||
|
PLEX_DB_DIR="/var/lib/plexmediaserver/Library/Application Support/Plex Media Server/Plug-in Support/Databases"
|
||||||
|
|
||||||
|
print_status() {
|
||||||
|
local color="$1"
|
||||||
|
local message="$2"
|
||||||
|
echo -e "${color}${message}${NC}"
|
||||||
|
}
|
||||||
|
|
||||||
|
print_header() {
|
||||||
|
echo
|
||||||
|
print_status "$BLUE" "================================"
|
||||||
|
print_status "$BLUE" "$1"
|
||||||
|
print_status "$BLUE" "================================"
|
||||||
|
}
|
||||||
|
|
||||||
|
# Check service status
|
||||||
|
check_service_status() {
|
||||||
|
print_header "SERVICE STATUS CHECK"
|
||||||
|
|
||||||
|
if systemctl is-active --quiet plexmediaserver; then
|
||||||
|
print_status "$GREEN" "✓ Plex Media Server is running"
|
||||||
|
|
||||||
|
# Get service uptime
|
||||||
|
local uptime=$(systemctl show plexmediaserver --property=ActiveEnterTimestamp --value)
|
||||||
|
print_status "$GREEN" " Started: $uptime"
|
||||||
|
|
||||||
|
# Get memory usage
|
||||||
|
local memory=$(systemctl show plexmediaserver --property=MemoryCurrent --value)
|
||||||
|
if [[ -n "$memory" && "$memory" != "[not set]" ]]; then
|
||||||
|
local memory_mb=$((memory / 1024 / 1024))
|
||||||
|
print_status "$GREEN" " Memory usage: ${memory_mb}MB"
|
||||||
|
fi
|
||||||
|
|
||||||
|
return 0
|
||||||
|
else
|
||||||
|
print_status "$RED" "✗ Plex Media Server is not running"
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
# Check database integrity
|
||||||
|
check_database_integrity() {
|
||||||
|
print_header "DATABASE INTEGRITY CHECK"
|
||||||
|
|
||||||
|
local main_db="${PLEX_DB_DIR}/com.plexapp.plugins.library.db"
|
||||||
|
local blobs_db="${PLEX_DB_DIR}/com.plexapp.plugins.library.blobs.db"
|
||||||
|
local all_good=true
|
||||||
|
|
||||||
|
# Check main database
|
||||||
|
if [[ -f "$main_db" ]]; then
|
||||||
|
local main_size=$(du -h "$main_db" | cut -f1)
|
||||||
|
print_status "$GREEN" "✓ Main database exists (${main_size})"
|
||||||
|
|
||||||
|
# Try basic database operations
|
||||||
|
if sqlite3 "$main_db" "SELECT COUNT(*) FROM sqlite_master WHERE type='table';" >/dev/null 2>&1; then
|
||||||
|
local table_count=$(sqlite3 "$main_db" "SELECT COUNT(*) FROM sqlite_master WHERE type='table';" 2>/dev/null)
|
||||||
|
print_status "$GREEN" " Contains $table_count tables"
|
||||||
|
else
|
||||||
|
print_status "$YELLOW" " Warning: Cannot query database tables"
|
||||||
|
all_good=false
|
||||||
|
fi
|
||||||
|
else
|
||||||
|
print_status "$RED" "✗ Main database missing"
|
||||||
|
all_good=false
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Check blobs database
|
||||||
|
if [[ -f "$blobs_db" ]]; then
|
||||||
|
local blobs_size=$(du -h "$blobs_db" | cut -f1)
|
||||||
|
print_status "$GREEN" "✓ Blobs database exists (${blobs_size})"
|
||||||
|
|
||||||
|
# Check if it's not empty (previous corruption was 0 bytes)
|
||||||
|
local blobs_bytes=$(stat -c%s "$blobs_db" 2>/dev/null || stat -f%z "$blobs_db" 2>/dev/null)
|
||||||
|
if [[ $blobs_bytes -gt 1000000 ]]; then
|
||||||
|
print_status "$GREEN" " File size is healthy ($(numfmt --to=iec $blobs_bytes))"
|
||||||
|
else
|
||||||
|
print_status "$RED" " Warning: File size is too small ($blobs_bytes bytes)"
|
||||||
|
all_good=false
|
||||||
|
fi
|
||||||
|
else
|
||||||
|
print_status "$RED" "✗ Blobs database missing"
|
||||||
|
all_good=false
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Check file ownership
|
||||||
|
local main_owner=$(stat -c%U:%G "$main_db" 2>/dev/null)
|
||||||
|
local blobs_owner=$(stat -c%U:%G "$blobs_db" 2>/dev/null)
|
||||||
|
|
||||||
|
if [[ "$main_owner" == "plex:plex" && "$blobs_owner" == "plex:plex" ]]; then
|
||||||
|
print_status "$GREEN" "✓ Database ownership is correct (plex:plex)"
|
||||||
|
else
|
||||||
|
print_status "$YELLOW" " Warning: Ownership issues detected"
|
||||||
|
print_status "$YELLOW" " Main DB: $main_owner, Blobs DB: $blobs_owner"
|
||||||
|
fi
|
||||||
|
|
||||||
|
return $([[ "$all_good" == "true" ]] && echo 0 || echo 1)
|
||||||
|
}
|
||||||
|
|
||||||
|
# Check web interface
|
||||||
|
check_web_interface() {
|
||||||
|
print_header "WEB INTERFACE CHECK"
|
||||||
|
|
||||||
|
local max_attempts=5
|
||||||
|
local attempt=1
|
||||||
|
|
||||||
|
while [[ $attempt -le $max_attempts ]]; do
|
||||||
|
if curl -s -o /dev/null -w "%{http_code}" "http://localhost:32400/web/index.html" | grep -q "200"; then
|
||||||
|
print_status "$GREEN" "✓ Web interface is accessible"
|
||||||
|
print_status "$GREEN" " URL: http://localhost:32400"
|
||||||
|
return 0
|
||||||
|
fi
|
||||||
|
|
||||||
|
print_status "$YELLOW" " Attempt $attempt/$max_attempts: Web interface not ready..."
|
||||||
|
sleep 2
|
||||||
|
((attempt++))
|
||||||
|
done
|
||||||
|
|
||||||
|
print_status "$RED" "✗ Web interface is not accessible"
|
||||||
|
return 1
|
||||||
|
}
|
||||||
|
|
||||||
|
# Check API functionality
|
||||||
|
check_api_functionality() {
|
||||||
|
print_header "API FUNCTIONALITY CHECK"
|
||||||
|
|
||||||
|
# Test root API endpoint
|
||||||
|
local api_response=$(curl -s "http://localhost:32400/" 2>/dev/null)
|
||||||
|
|
||||||
|
if echo "$api_response" | grep -q "Unauthorized\|web/index.html"; then
|
||||||
|
print_status "$GREEN" "✓ API is responding (redirect to web interface)"
|
||||||
|
else
|
||||||
|
print_status "$YELLOW" " Warning: Unexpected API response"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Try to get server identity (this might work without auth)
|
||||||
|
local identity_response=$(curl -s "http://localhost:32400/identity" 2>/dev/null)
|
||||||
|
|
||||||
|
if echo "$identity_response" | grep -q "MediaContainer"; then
|
||||||
|
print_status "$GREEN" "✓ Server identity endpoint working"
|
||||||
|
else
|
||||||
|
print_status "$YELLOW" " Note: Server identity requires authentication"
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
# Check recent logs for errors
|
||||||
|
check_recent_logs() {
|
||||||
|
print_header "RECENT LOGS CHECK"
|
||||||
|
|
||||||
|
# Check for recent errors in systemd logs
|
||||||
|
local recent_errors=$(sudo journalctl -u plexmediaserver --since "5 minutes ago" --no-pager -q 2>/dev/null | grep -i "error\|fail\|exception" | head -3)
|
||||||
|
|
||||||
|
if [[ -z "$recent_errors" ]]; then
|
||||||
|
print_status "$GREEN" "✓ No recent errors in service logs"
|
||||||
|
else
|
||||||
|
print_status "$YELLOW" " Recent log entries found:"
|
||||||
|
echo "$recent_errors" | while read -r line; do
|
||||||
|
print_status "$YELLOW" " $line"
|
||||||
|
done
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
# Show recovery summary
|
||||||
|
show_recovery_summary() {
|
||||||
|
print_header "RECOVERY SUMMARY"
|
||||||
|
|
||||||
|
local corrupted_backup_dir="${PLEX_DB_DIR}/corrupted-20250605_060232"
|
||||||
|
if [[ -d "$corrupted_backup_dir" ]]; then
|
||||||
|
print_status "$GREEN" "✓ Corrupted databases backed up to:"
|
||||||
|
print_status "$GREEN" " $corrupted_backup_dir"
|
||||||
|
fi
|
||||||
|
|
||||||
|
print_status "$GREEN" "✓ Databases restored from: 2025-06-02 backups"
|
||||||
|
print_status "$GREEN" "✓ File ownership corrected to plex:plex"
|
||||||
|
print_status "$GREEN" "✓ Service restarted successfully"
|
||||||
|
|
||||||
|
echo
|
||||||
|
print_status "$BLUE" "NEXT STEPS:"
|
||||||
|
print_status "$YELLOW" "1. Access Plex at: http://localhost:32400"
|
||||||
|
print_status "$YELLOW" "2. Verify your libraries are intact"
|
||||||
|
print_status "$YELLOW" "3. Consider running a library scan to pick up recent changes"
|
||||||
|
print_status "$YELLOW" "4. Monitor the service for a few days to ensure stability"
|
||||||
|
}
|
||||||
|
|
||||||
|
# Main function
|
||||||
|
main() {
|
||||||
|
print_status "$BLUE" "PLEX RECOVERY VALIDATION"
|
||||||
|
print_status "$BLUE" "$(date)"
|
||||||
|
echo
|
||||||
|
|
||||||
|
local overall_status=0
|
||||||
|
|
||||||
|
check_service_status || overall_status=1
|
||||||
|
check_database_integrity || overall_status=1
|
||||||
|
check_web_interface || overall_status=1
|
||||||
|
check_api_functionality
|
||||||
|
check_recent_logs
|
||||||
|
show_recovery_summary
|
||||||
|
|
||||||
|
echo
|
||||||
|
if [[ $overall_status -eq 0 ]]; then
|
||||||
|
print_status "$GREEN" "🎉 RECOVERY SUCCESSFUL! Plex Media Server is fully functional."
|
||||||
|
else
|
||||||
|
print_status "$YELLOW" "⚠️ RECOVERY PARTIALLY SUCCESSFUL - Some issues detected."
|
||||||
|
print_status "$YELLOW" " Plex is running but may need additional attention."
|
||||||
|
fi
|
||||||
|
|
||||||
|
return $overall_status
|
||||||
|
}
|
||||||
|
|
||||||
|
# Run the validation
|
||||||
|
main "$@"
|
||||||
240
setup/run-docker-tests.sh.sc2086_backup
Executable file
240
setup/run-docker-tests.sh.sc2086_backup
Executable file
@@ -0,0 +1,240 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
# Script to run setup tests in Docker containers
|
||||||
|
# This allows testing the setup process in isolated environments
|
||||||
|
|
||||||
|
set -e # Exit on error
|
||||||
|
|
||||||
|
# Colors for output
|
||||||
|
GREEN='\033[0;32m'
|
||||||
|
YELLOW='\033[0;33m'
|
||||||
|
RED='\033[0;31m'
|
||||||
|
BLUE='\033[0;34m'
|
||||||
|
NC='\033[0m' # No Color
|
||||||
|
|
||||||
|
# Check if Docker is installed and working
|
||||||
|
if ! command -v docker &> /dev/null; then
|
||||||
|
echo -e "${RED}Error: Docker is not installed. Please install Docker to run tests.${NC}"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Create logs directory at the top level to ensure it exists
|
||||||
|
LOGS_DIR="$(pwd)/logs"
|
||||||
|
if [ ! -d "$LOGS_DIR" ]; then
|
||||||
|
echo -e "${YELLOW}Creating logs directory at: $LOGS_DIR${NC}"
|
||||||
|
mkdir -p "$LOGS_DIR" || {
|
||||||
|
echo -e "${RED}Failed to create logs directory! Check permissions.${NC}"
|
||||||
|
exit 1
|
||||||
|
}
|
||||||
|
else
|
||||||
|
echo -e "${GREEN}Logs directory already exists at: $LOGS_DIR${NC}"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Ensure the logs directory is writable
|
||||||
|
if [ ! -w "$LOGS_DIR" ]; then
|
||||||
|
echo -e "${YELLOW}Setting permissions on logs directory...${NC}"
|
||||||
|
chmod -R 755 "$LOGS_DIR" && \
|
||||||
|
find "$LOGS_DIR" -type f -exec chmod 644 {} \; || {
|
||||||
|
echo -e "${RED}Failed to set write permissions on logs directory!${NC}"
|
||||||
|
exit 1
|
||||||
|
}
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Create a test file to verify we can write to it
|
||||||
|
if touch "$LOGS_DIR/test_file" && rm "$LOGS_DIR/test_file"; then
|
||||||
|
echo -e "${GREEN}Log directory is writable and ready for use${NC}"
|
||||||
|
else
|
||||||
|
echo -e "${RED}Cannot write to logs directory even after setting permissions!${NC}"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Check if Docker is running
|
||||||
|
if ! docker info &>/dev/null; then
|
||||||
|
echo -e "${YELLOW}Warning: Docker appears to be installed but not running or not properly configured.${NC}"
|
||||||
|
echo -e "${YELLOW}If using WSL2, ensure Docker Desktop is running with WSL integration enabled.${NC}"
|
||||||
|
echo -e "${YELLOW}Would you like to run the local test instead? [Y/n]${NC}"
|
||||||
|
read -r response
|
||||||
|
if [[ "$response" =~ ^([nN][oO]|[nN])$ ]]; then
|
||||||
|
echo -e "${RED}Exiting...${NC}"
|
||||||
|
exit 1
|
||||||
|
else
|
||||||
|
echo -e "${BLUE}Running local test instead...${NC}"
|
||||||
|
./test-setup.sh
|
||||||
|
exit $?
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Build and run Ubuntu test container
|
||||||
|
run_ubuntu_test() {
|
||||||
|
echo -e "\n${BLUE}=== Running test in Ubuntu container ===${NC}"
|
||||||
|
# Create the logs directory if it doesn't exist
|
||||||
|
local log_dir="$(pwd)/logs"
|
||||||
|
mkdir -p "$log_dir" || true
|
||||||
|
|
||||||
|
# Use sudo for chmod only if necessary
|
||||||
|
if [ ! -w "$log_dir" ]; then
|
||||||
|
echo -e "${YELLOW}Attempting to fix permissions with sudo...${NC}"
|
||||||
|
sudo chmod -R 755 "$log_dir" 2>/dev/null && \
|
||||||
|
sudo find "$log_dir" -type f -exec chmod 644 {} \; 2>/dev/null || {
|
||||||
|
echo -e "${YELLOW}Could not change permissions with sudo, continuing anyway...${NC}"
|
||||||
|
}
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo -e "${YELLOW}Logs will be saved to: $log_dir${NC}"
|
||||||
|
echo -e "${YELLOW}Building Ubuntu test container...${NC}"
|
||||||
|
docker build --target ubuntu-test -t shell-test:ubuntu .
|
||||||
|
|
||||||
|
echo -e "${GREEN}Running tests with package installation...${NC}"
|
||||||
|
|
||||||
|
# Create a timestamp for this test run
|
||||||
|
TEST_TIMESTAMP=$(date +"%Y%m%d-%H%M%S")
|
||||||
|
echo -e "${YELLOW}Test run timestamp: $TEST_TIMESTAMP${NC}"
|
||||||
|
|
||||||
|
# Run container with proper volume mount and add environment variable for timestamp
|
||||||
|
docker run --rm -it \
|
||||||
|
-e TEST_TIMESTAMP="$TEST_TIMESTAMP" \
|
||||||
|
-e CONTAINER_TYPE="ubuntu" \
|
||||||
|
-v "$log_dir:/logs:z" \
|
||||||
|
shell-test:ubuntu
|
||||||
|
|
||||||
|
# Check if logs were created
|
||||||
|
if ls "$log_dir"/setup-test-*"$TEST_TIMESTAMP"* &>/dev/null 2>&1; then
|
||||||
|
echo -e "${GREEN}Test logs successfully created in host directory${NC}"
|
||||||
|
else
|
||||||
|
echo -e "${YELLOW}Warning: No log files found matching timestamp $TEST_TIMESTAMP${NC}"
|
||||||
|
echo -e "${YELLOW}This may indicate issues with volume mounting or permissions${NC}"
|
||||||
|
echo -e "${YELLOW}Contents of log directory:${NC}"
|
||||||
|
ls -la "$log_dir" || echo "Cannot list directory contents"
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo -e "${BLUE}Test completed. Check logs in $log_dir directory${NC}"
|
||||||
|
}
|
||||||
|
|
||||||
|
# Build and run Debian test container
|
||||||
|
run_debian_test() {
|
||||||
|
echo -e "\n${BLUE}=== Running test in Debian container ===${NC}"
|
||||||
|
# Create the logs directory if it doesn't exist
|
||||||
|
local log_dir="$(pwd)/logs"
|
||||||
|
mkdir -p "$log_dir" || true
|
||||||
|
|
||||||
|
# Use sudo for chmod only if necessary
|
||||||
|
if [ ! -w "$log_dir" ]; then
|
||||||
|
echo -e "${YELLOW}Attempting to fix permissions with sudo...${NC}"
|
||||||
|
sudo chmod -R 755 "$log_dir" 2>/dev/null && \
|
||||||
|
sudo find "$log_dir" -type f -exec chmod 644 {} \; 2>/dev/null || {
|
||||||
|
echo -e "${YELLOW}Could not change permissions with sudo, continuing anyway...${NC}"
|
||||||
|
}
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo -e "${YELLOW}Logs will be saved to: $log_dir${NC}"
|
||||||
|
echo -e "${YELLOW}Building Debian test container...${NC}"
|
||||||
|
docker build --target debian-test -t shell-test:debian .
|
||||||
|
|
||||||
|
echo -e "${GREEN}Running tests with package installation...${NC}"
|
||||||
|
|
||||||
|
# Create a timestamp for this test run
|
||||||
|
TEST_TIMESTAMP=$(date +"%Y%m%d-%H%M%S")
|
||||||
|
echo -e "${YELLOW}Test run timestamp: $TEST_TIMESTAMP${NC}"
|
||||||
|
|
||||||
|
# Run container with proper volume mount and add environment variable for timestamp
|
||||||
|
docker run --rm -it \
|
||||||
|
-e TEST_TIMESTAMP="$TEST_TIMESTAMP" \
|
||||||
|
-e CONTAINER_TYPE="debian" \
|
||||||
|
-v "$log_dir:/logs:z" \
|
||||||
|
shell-test:debian
|
||||||
|
|
||||||
|
# Check if logs were created
|
||||||
|
if ls "$log_dir"/setup-test-*"$TEST_TIMESTAMP"* &>/dev/null 2>&1; then
|
||||||
|
echo -e "${GREEN}Test logs successfully created in host directory${NC}"
|
||||||
|
else
|
||||||
|
echo -e "${YELLOW}Warning: No log files found matching timestamp $TEST_TIMESTAMP${NC}"
|
||||||
|
echo -e "${YELLOW}This may indicate issues with volume mounting or permissions${NC}"
|
||||||
|
echo -e "${YELLOW}Contents of log directory:${NC}"
|
||||||
|
ls -la "$log_dir" || echo "Cannot list directory contents"
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo -e "${BLUE}Test completed. Check logs in $log_dir directory${NC}"
|
||||||
|
}
|
||||||
|
|
||||||
|
# Full test with bootstrap script
|
||||||
|
run_full_test() {
|
||||||
|
local distro=$1
|
||||||
|
local tag_name=$(echo $distro | sed 's/:/-/g') # Replace colon with hyphen for tag
|
||||||
|
echo -e "\n${BLUE}=== Running full bootstrap test in $distro container ===${NC}"
|
||||||
|
|
||||||
|
# Create a Dockerfile for full test
|
||||||
|
cat > Dockerfile.fulltest <<EOF
|
||||||
|
FROM $distro
|
||||||
|
LABEL description="$distro full test environment for bootstrap.sh script"
|
||||||
|
|
||||||
|
# Install minimal dependencies needed to run the bootstrap
|
||||||
|
ENV TZ=America/New_York
|
||||||
|
ENV DEBIAN_FRONTEND=noninteractive
|
||||||
|
RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone \
|
||||||
|
&& apt-get update && apt-get install -y curl git sudo wget
|
||||||
|
|
||||||
|
# Create a test user with sudo permissions
|
||||||
|
RUN useradd -ms /bin/bash testuser && \\
|
||||||
|
echo "testuser ALL=(ALL) NOPASSWD:ALL" > /etc/sudoers.d/testuser
|
||||||
|
|
||||||
|
# Create directory structure for setup files
|
||||||
|
RUN mkdir -p /home/testuser/shell
|
||||||
|
|
||||||
|
# Copy test script for post-bootstrap validation
|
||||||
|
COPY --chown=testuser:testuser test-setup.sh /home/testuser/
|
||||||
|
|
||||||
|
# Copy entire repo structure to ensure we have all needed files
|
||||||
|
COPY --chown=testuser:testuser . /home/testuser/shell/
|
||||||
|
|
||||||
|
USER testuser
|
||||||
|
WORKDIR /home/testuser
|
||||||
|
|
||||||
|
# Make the script executable
|
||||||
|
RUN chmod +x /home/testuser/test-setup.sh
|
||||||
|
|
||||||
|
# Run tests before and after bootstrap to verify package installation
|
||||||
|
CMD ["/bin/bash", "-c", "echo -e '\\n\\nRunning pre-bootstrap tests...' && ./test-setup.sh && echo -e '\\n\\nRunning bootstrap...' && /home/testuser/shell/bootstrap.sh && echo -e '\\n\\nRunning post-bootstrap tests...' && ./test-setup.sh"]
|
||||||
|
EOF
|
||||||
|
|
||||||
|
# Build and run the container
|
||||||
|
# Create the logs directory if it doesn't exist
|
||||||
|
mkdir -p "$(pwd)/logs"
|
||||||
|
docker build -f Dockerfile.fulltest -t shell-full-test:$tag_name .
|
||||||
|
docker run --rm -it -v "$(pwd)/logs:/logs" shell-full-test:$tag_name
|
||||||
|
|
||||||
|
# Clean up
|
||||||
|
rm Dockerfile.fulltest
|
||||||
|
}
|
||||||
|
|
||||||
|
# Parse command line arguments
|
||||||
|
case "$1" in
|
||||||
|
ubuntu)
|
||||||
|
run_ubuntu_test
|
||||||
|
;;
|
||||||
|
debian)
|
||||||
|
run_debian_test
|
||||||
|
;;
|
||||||
|
full-ubuntu)
|
||||||
|
run_full_test "ubuntu:24.04"
|
||||||
|
;;
|
||||||
|
full-debian)
|
||||||
|
run_full_test "debian:12"
|
||||||
|
;;
|
||||||
|
all)
|
||||||
|
run_ubuntu_test
|
||||||
|
run_debian_test
|
||||||
|
;;
|
||||||
|
*)
|
||||||
|
echo -e "${BLUE}Shell Setup Test Runner${NC}"
|
||||||
|
echo -e "Usage: $0 [option]"
|
||||||
|
echo -e "\nOptions:"
|
||||||
|
echo " ubuntu Run test on Ubuntu container (tests packages and components)"
|
||||||
|
echo " debian Run test on Debian container (tests packages and components)"
|
||||||
|
echo " full-ubuntu Run full bootstrap test on Ubuntu container (performs complete installation)"
|
||||||
|
echo " full-debian Run full bootstrap test on Debian container (performs complete installation)"
|
||||||
|
echo " all Run tests on both Ubuntu and Debian containers (component tests only)"
|
||||||
|
echo -e "\nExamples:"
|
||||||
|
echo -e " $0 ubuntu # Quick test for package availability"
|
||||||
|
echo -e " $0 full-debian # Test complete bootstrap installation"
|
||||||
|
;;
|
||||||
|
esac
|
||||||
12
ssh-login.sh
12
ssh-login.sh
@@ -1,15 +1,15 @@
|
|||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
|
|
||||||
#
|
#
|
||||||
# This script is referenced in `/etc/pam.d/sshd`
|
# This script is referenced in $(/etc/pam.d/sshd)
|
||||||
# - at the end of the `sshd` file, add
|
# - at the end of the $(sshd) file, add
|
||||||
# `session optional pam_exec.so /home/acedanger/shell/ssh-login.sh`
|
# $(session optional pam_exec.so /home/acedanger/shell/ssh-login.sh)
|
||||||
#
|
#
|
||||||
|
|
||||||
if [ "${PAM_TYPE}" = "open_session" && "${PAM_RHOST}" -ne "10.0.1.4" ]; then
|
if [ "${PAM_TYPE}" = "open_session" ] && [ "${PAM_RHOST}" != "10.0.1.4" ]; then
|
||||||
curl \
|
curl \
|
||||||
-H prio:urgent \
|
-H prio:urgent \
|
||||||
-H tags:warning,ssh,login,${HOSTNAME} \
|
-H tags:warning,ssh,login,"${HOSTNAME}" \
|
||||||
-d "SSH login [${HOSTNAME}] user ${PAM_USER} from ${PAM_RHOST}" \
|
-d "SSH login [${HOSTNAME}] user ${PAM_USER} from ${PAM_RHOST}" \
|
||||||
https://notify.peterwood.rocks/lab
|
https://notify.peterwood.rocks/lab
|
||||||
fi
|
fi
|
||||||
|
|||||||
Reference in New Issue
Block a user