mirror of
https://github.com/acedanger/shell.git
synced 2025-12-05 22:50:18 -08:00
feat: Add base HTML template and implement dashboard, logs, and service views
- Created a base HTML template for consistent layout across pages. - Developed a dashboard page to display backup service metrics and statuses. - Implemented a log viewer for detailed log file inspection. - Added error handling page for better user experience during failures. - Introduced service detail page to show specific service metrics and actions. - Enhanced log filtering and viewing capabilities. - Integrated auto-refresh functionality for real-time updates on metrics. - Created integration and unit test scripts for backup metrics functionality.
This commit is contained in:
610
generate-backup-metrics.sh
Executable file
610
generate-backup-metrics.sh
Executable file
@@ -0,0 +1,610 @@
|
||||
#!/bin/bash
|
||||
|
||||
################################################################################
|
||||
# Backup Metrics JSON Generator
|
||||
################################################################################
|
||||
#
|
||||
# Author: Peter Wood <peter@peterwood.dev>
|
||||
# Description: Generates comprehensive JSON metrics for all backup services
|
||||
# to support web application monitoring and management interface.
|
||||
#
|
||||
# Features:
|
||||
# - Scans backup directory structure automatically
|
||||
# - Extracts metadata from backup files (size, timestamps, checksums)
|
||||
# - Generates standardized JSON metrics per service
|
||||
# - Handles scheduled backup subdirectories
|
||||
# - Includes performance metrics from log files
|
||||
# - Creates consolidated metrics index
|
||||
#
|
||||
# Output Structure:
|
||||
# /mnt/share/media/backups/metrics/
|
||||
# ├── index.json # Service directory index
|
||||
# ├── {service_name}/
|
||||
# │ ├── metrics.json # Service backup metrics
|
||||
# │ └── history.json # Historical backup data
|
||||
# └── consolidated.json # All services summary
|
||||
#
|
||||
# Usage:
|
||||
# ./generate-backup-metrics.sh # Generate all metrics
|
||||
# ./generate-backup-metrics.sh plex # Generate metrics for specific service
|
||||
# ./generate-backup-metrics.sh --watch # Monitor mode with auto-refresh
|
||||
#
|
||||
################################################################################
|
||||
|
||||
set -e
|
||||
|
||||
# Colors for output
|
||||
GREEN='\033[0;32m'
|
||||
YELLOW='\033[1;33m'
|
||||
RED='\033[0;31m'
|
||||
BLUE='\033[0;34m'
|
||||
CYAN='\033[0;36m'
|
||||
NC='\033[0m'
|
||||
|
||||
# Configuration
|
||||
BACKUP_ROOT="${BACKUP_ROOT:-/mnt/share/media/backups}"
|
||||
METRICS_ROOT="${BACKUP_ROOT}/metrics"
|
||||
SCRIPT_DIR="$(dirname "$(readlink -f "$0")")"
|
||||
LOG_FILE="${SCRIPT_DIR}/logs/backup-metrics-$(date +%Y%m%d).log"
|
||||
|
||||
# Ensure required directories exist
|
||||
mkdir -p "${METRICS_ROOT}" "${SCRIPT_DIR}/logs"
|
||||
|
||||
# Logging functions
|
||||
log_message() {
|
||||
local message="$1"
|
||||
local timestamp=$(date '+%Y-%m-%d %H:%M:%S')
|
||||
echo -e "${CYAN}[${timestamp}]${NC} ${message}"
|
||||
echo "[${timestamp}] $message" >> "$LOG_FILE" 2>/dev/null || true
|
||||
}
|
||||
|
||||
log_error() {
|
||||
local message="$1"
|
||||
local timestamp=$(date '+%Y-%m-%d %H:%M:%S')
|
||||
echo -e "${RED}[${timestamp}] ERROR:${NC} ${message}" >&2
|
||||
echo "[${timestamp}] ERROR: $message" >> "$LOG_FILE" 2>/dev/null || true
|
||||
}
|
||||
|
||||
log_success() {
|
||||
local message="$1"
|
||||
local timestamp=$(date '+%Y-%m-%d %H:%M:%S')
|
||||
echo -e "${GREEN}[${timestamp}] SUCCESS:${NC} ${message}"
|
||||
echo "[${timestamp}] SUCCESS: $message" >> "$LOG_FILE" 2>/dev/null || true
|
||||
}
|
||||
|
||||
log_warning() {
|
||||
local message="$1"
|
||||
local timestamp=$(date '+%Y-%m-%d %H:%M:%S')
|
||||
echo -e "${YELLOW}[${timestamp}] WARNING:${NC} ${message}"
|
||||
echo "[${timestamp}] WARNING: $message" >> "$LOG_FILE" 2>/dev/null || true
|
||||
}
|
||||
|
||||
# Check dependencies
|
||||
check_dependencies() {
|
||||
local missing_deps=()
|
||||
|
||||
for cmd in jq stat find; do
|
||||
if ! command -v "$cmd" >/dev/null 2>&1; then
|
||||
missing_deps+=("$cmd")
|
||||
fi
|
||||
done
|
||||
|
||||
if [ ${#missing_deps[@]} -gt 0 ]; then
|
||||
log_error "Missing required dependencies: ${missing_deps[*]}"
|
||||
log_error "Install with: sudo apt-get install jq coreutils findutils"
|
||||
return 1
|
||||
fi
|
||||
|
||||
return 0
|
||||
}
|
||||
|
||||
# Get file metadata in JSON format
|
||||
get_file_metadata() {
|
||||
local file_path="$1"
|
||||
|
||||
if [ ! -f "$file_path" ]; then
|
||||
echo "{}"
|
||||
return 1
|
||||
fi
|
||||
|
||||
local size_bytes=$(stat -c%s "$file_path" 2>/dev/null || echo "0")
|
||||
local size_mb=$((size_bytes / 1048576))
|
||||
local modified_epoch=$(stat -c%Y "$file_path" 2>/dev/null || echo "0")
|
||||
local modified_iso=$(date -d "@$modified_epoch" --iso-8601=seconds 2>/dev/null || echo "")
|
||||
local checksum=""
|
||||
|
||||
# Calculate checksum for smaller files (< 100MB) to avoid long delays
|
||||
if [ "$size_mb" -lt 100 ]; then
|
||||
checksum=$(md5sum "$file_path" 2>/dev/null | cut -d' ' -f1 || echo "")
|
||||
fi
|
||||
|
||||
jq -n \
|
||||
--arg path "$file_path" \
|
||||
--arg filename "$(basename "$file_path")" \
|
||||
--argjson size_bytes "$size_bytes" \
|
||||
--argjson size_mb "$size_mb" \
|
||||
--arg size_human "$(numfmt --to=iec-i --suffix=B "$size_bytes" 2>/dev/null || echo "${size_mb}MB")" \
|
||||
--argjson modified_epoch "$modified_epoch" \
|
||||
--arg modified_iso "$modified_iso" \
|
||||
--arg checksum "$checksum" \
|
||||
'{
|
||||
path: $path,
|
||||
filename: $filename,
|
||||
size: {
|
||||
bytes: $size_bytes,
|
||||
mb: $size_mb,
|
||||
human: $size_human
|
||||
},
|
||||
modified: {
|
||||
epoch: $modified_epoch,
|
||||
iso: $modified_iso
|
||||
},
|
||||
checksum: $checksum
|
||||
}'
|
||||
}
|
||||
|
||||
# Extract timestamp from filename patterns
|
||||
extract_timestamp_from_filename() {
|
||||
local filename="$1"
|
||||
local timestamp=""
|
||||
|
||||
# Try various timestamp patterns
|
||||
if [[ "$filename" =~ ([0-9]{8}_[0-9]{6}) ]]; then
|
||||
# Format: YYYYMMDD_HHMMSS
|
||||
local date_part="${BASH_REMATCH[1]}"
|
||||
timestamp=$(date -d "${date_part:0:8} ${date_part:9:2}:${date_part:11:2}:${date_part:13:2}" --iso-8601=seconds 2>/dev/null || echo "")
|
||||
elif [[ "$filename" =~ ([0-9]{8}-[0-9]{6}) ]]; then
|
||||
# Format: YYYYMMDD-HHMMSS
|
||||
local date_part="${BASH_REMATCH[1]}"
|
||||
timestamp=$(date -d "${date_part:0:8} ${date_part:9:2}:${date_part:11:2}:${date_part:13:2}" --iso-8601=seconds 2>/dev/null || echo "")
|
||||
elif [[ "$filename" =~ ([0-9]{4}-[0-9]{2}-[0-9]{2}) ]]; then
|
||||
# Format: YYYY-MM-DD (assume midnight)
|
||||
timestamp=$(date -d "${BASH_REMATCH[1]}" --iso-8601=seconds 2>/dev/null || echo "")
|
||||
fi
|
||||
|
||||
echo "$timestamp"
|
||||
}
|
||||
|
||||
# Parse performance logs for runtime metrics
|
||||
parse_performance_logs() {
|
||||
local service_name="$1"
|
||||
local service_dir="$2"
|
||||
local performance_data="{}"
|
||||
|
||||
# Look for performance logs in various locations
|
||||
local log_patterns=(
|
||||
"${service_dir}/logs/*.json"
|
||||
"${BACKUP_ROOT}/logs/*${service_name}*.json"
|
||||
"${SCRIPT_DIR}/logs/*${service_name}*.json"
|
||||
)
|
||||
|
||||
for pattern in "${log_patterns[@]}"; do
|
||||
for log_file in ${pattern}; do
|
||||
if [ -f "$log_file" ]; then
|
||||
log_message "Found performance log: $log_file"
|
||||
|
||||
# Try to parse JSON performance data
|
||||
if jq empty "$log_file" 2>/dev/null; then
|
||||
local log_data=$(cat "$log_file")
|
||||
performance_data=$(echo "$performance_data" | jq --argjson new_data "$log_data" '. + $new_data')
|
||||
fi
|
||||
fi
|
||||
done
|
||||
done
|
||||
|
||||
echo "$performance_data"
|
||||
}
|
||||
|
||||
# Get backup metrics for a service
|
||||
get_service_metrics() {
|
||||
local service_name="$1"
|
||||
local service_dir="${BACKUP_ROOT}/${service_name}"
|
||||
|
||||
if [ ! -d "$service_dir" ]; then
|
||||
log_warning "Service directory not found: $service_dir"
|
||||
return 1
|
||||
fi
|
||||
|
||||
log_message "Processing service: $service_name"
|
||||
|
||||
local backup_files=()
|
||||
local scheduled_files=()
|
||||
local total_size_bytes=0
|
||||
local latest_backup=""
|
||||
local latest_timestamp=0
|
||||
|
||||
# Find backup files in main directory
|
||||
while IFS= read -r -d '' file; do
|
||||
if [ -f "$file" ]; then
|
||||
backup_files+=("$file")
|
||||
local file_size=$(stat -c%s "$file" 2>/dev/null || echo "0")
|
||||
total_size_bytes=$((total_size_bytes + file_size))
|
||||
|
||||
# Check if this is the latest backup
|
||||
local file_timestamp=$(stat -c%Y "$file" 2>/dev/null || echo "0")
|
||||
if [ "$file_timestamp" -gt "$latest_timestamp" ]; then
|
||||
latest_timestamp="$file_timestamp"
|
||||
latest_backup="$file"
|
||||
fi
|
||||
fi
|
||||
done < <(find "$service_dir" -maxdepth 1 -type f \( -name "*.tar.gz" -o -name "*.zip" -o -name "*.sql" -o -name "*.sql.gz" -o -name "*.db" \) -print0 2>/dev/null || true)
|
||||
|
||||
# Find backup files in scheduled subdirectory
|
||||
local scheduled_dir="${service_dir}/scheduled"
|
||||
if [ -d "$scheduled_dir" ]; then
|
||||
while IFS= read -r -d '' file; do
|
||||
if [ -f "$file" ]; then
|
||||
scheduled_files+=("$file")
|
||||
local file_size=$(stat -c%s "$file" 2>/dev/null || echo "0")
|
||||
total_size_bytes=$((total_size_bytes + file_size))
|
||||
|
||||
# Check if this is the latest backup
|
||||
local file_timestamp=$(stat -c%Y "$file" 2>/dev/null || echo "0")
|
||||
if [ "$file_timestamp" -gt "$latest_timestamp" ]; then
|
||||
latest_timestamp="$file_timestamp"
|
||||
latest_backup="$file"
|
||||
fi
|
||||
fi
|
||||
done < <(find "$scheduled_dir" -type f \( -name "*.tar.gz" -o -name "*.zip" -o -name "*.sql" -o -name "*.sql.gz" -o -name "*.db" \) -print0 2>/dev/null || true)
|
||||
fi
|
||||
|
||||
# Calculate metrics
|
||||
local total_files=$((${#backup_files[@]} + ${#scheduled_files[@]}))
|
||||
local total_size_mb=$((total_size_bytes / 1048576))
|
||||
local total_size_human=$(numfmt --to=iec-i --suffix=B "$total_size_bytes" 2>/dev/null || echo "${total_size_mb}MB")
|
||||
|
||||
# Get latest backup metadata
|
||||
local latest_backup_metadata="{}"
|
||||
if [ -n "$latest_backup" ]; then
|
||||
latest_backup_metadata=$(get_file_metadata "$latest_backup")
|
||||
fi
|
||||
|
||||
# Parse performance logs
|
||||
local performance_metrics
|
||||
performance_metrics=$(parse_performance_logs "$service_name" "$service_dir")
|
||||
|
||||
# Generate service metrics JSON
|
||||
local service_metrics
|
||||
service_metrics=$(jq -n \
|
||||
--arg service_name "$service_name" \
|
||||
--arg backup_path "$service_dir" \
|
||||
--arg scheduled_path "$scheduled_dir" \
|
||||
--argjson total_files "$total_files" \
|
||||
--argjson main_files "${#backup_files[@]}" \
|
||||
--argjson scheduled_files "${#scheduled_files[@]}" \
|
||||
--argjson total_size_bytes "$total_size_bytes" \
|
||||
--argjson total_size_mb "$total_size_mb" \
|
||||
--arg total_size_human "$total_size_human" \
|
||||
--argjson latest_backup "$latest_backup_metadata" \
|
||||
--argjson performance "$performance_metrics" \
|
||||
--arg generated_at "$(date --iso-8601=seconds)" \
|
||||
--argjson generated_epoch "$(date +%s)" \
|
||||
'{
|
||||
service_name: $service_name,
|
||||
backup_path: $backup_path,
|
||||
scheduled_path: $scheduled_path,
|
||||
summary: {
|
||||
total_files: $total_files,
|
||||
main_directory_files: $main_files,
|
||||
scheduled_directory_files: $scheduled_files,
|
||||
total_size: {
|
||||
bytes: $total_size_bytes,
|
||||
mb: $total_size_mb,
|
||||
human: $total_size_human
|
||||
}
|
||||
},
|
||||
latest_backup: $latest_backup,
|
||||
performance_metrics: $performance,
|
||||
metadata: {
|
||||
generated_at: $generated_at,
|
||||
generated_epoch: $generated_epoch
|
||||
}
|
||||
}')
|
||||
|
||||
# Create service metrics directory
|
||||
local service_metrics_dir="${METRICS_ROOT}/${service_name}"
|
||||
mkdir -p "$service_metrics_dir"
|
||||
|
||||
# Write service metrics
|
||||
echo "$service_metrics" | jq '.' > "${service_metrics_dir}/metrics.json"
|
||||
log_success "Generated metrics for $service_name (${total_files} files, ${total_size_human})"
|
||||
|
||||
# Generate detailed file history
|
||||
generate_service_history "$service_name" "$service_dir" "$service_metrics_dir"
|
||||
|
||||
echo "$service_metrics"
|
||||
}
|
||||
|
||||
# Generate detailed backup history for a service
|
||||
generate_service_history() {
|
||||
local service_name="$1"
|
||||
local service_dir="$2"
|
||||
local output_dir="$3"
|
||||
|
||||
local history_array="[]"
|
||||
local file_count=0
|
||||
|
||||
# Process all backup files
|
||||
local search_dirs=("$service_dir")
|
||||
if [ -d "${service_dir}/scheduled" ]; then
|
||||
search_dirs+=("${service_dir}/scheduled")
|
||||
fi
|
||||
|
||||
for search_dir in "${search_dirs[@]}"; do
|
||||
if [ ! -d "$search_dir" ]; then
|
||||
continue
|
||||
fi
|
||||
|
||||
while IFS= read -r -d '' file; do
|
||||
if [ -f "$file" ]; then
|
||||
local file_metadata
|
||||
file_metadata=$(get_file_metadata "$file")
|
||||
|
||||
# Add extracted timestamp
|
||||
local filename_timestamp
|
||||
filename_timestamp=$(extract_timestamp_from_filename "$(basename "$file")")
|
||||
|
||||
file_metadata=$(echo "$file_metadata" | jq --arg ts "$filename_timestamp" '. + {filename_timestamp: $ts}')
|
||||
|
||||
# Determine if file is in scheduled directory
|
||||
local is_scheduled=false
|
||||
if [[ "$file" == *"/scheduled/"* ]]; then
|
||||
is_scheduled=true
|
||||
fi
|
||||
|
||||
file_metadata=$(echo "$file_metadata" | jq --argjson scheduled "$is_scheduled" '. + {is_scheduled: $scheduled}')
|
||||
|
||||
history_array=$(echo "$history_array" | jq --argjson item "$file_metadata" '. + [$item]')
|
||||
file_count=$((file_count + 1))
|
||||
fi
|
||||
done < <(find "$search_dir" -type f \( -name "*.tar.gz" -o -name "*.zip" -o -name "*.sql" -o -name "*.sql.gz" -o -name "*.db" \) -print0 2>/dev/null || true)
|
||||
done
|
||||
|
||||
# Sort by modification time (newest first)
|
||||
history_array=$(echo "$history_array" | jq 'sort_by(.modified.epoch) | reverse')
|
||||
|
||||
# Create history JSON
|
||||
local history_json
|
||||
history_json=$(jq -n \
|
||||
--arg service_name "$service_name" \
|
||||
--argjson total_files "$file_count" \
|
||||
--argjson files "$history_array" \
|
||||
--arg generated_at "$(date --iso-8601=seconds)" \
|
||||
'{
|
||||
service_name: $service_name,
|
||||
total_files: $total_files,
|
||||
files: $files,
|
||||
generated_at: $generated_at
|
||||
}')
|
||||
|
||||
echo "$history_json" | jq '.' > "${output_dir}/history.json"
|
||||
log_message "Generated history for $service_name ($file_count files)"
|
||||
}
|
||||
|
||||
# Discover all backup services
|
||||
discover_services() {
|
||||
local services=()
|
||||
|
||||
if [ ! -d "$BACKUP_ROOT" ]; then
|
||||
log_error "Backup root directory not found: $BACKUP_ROOT"
|
||||
return 1
|
||||
fi
|
||||
|
||||
# Find all subdirectories that contain backup files
|
||||
while IFS= read -r -d '' dir; do
|
||||
local service_name=$(basename "$dir")
|
||||
|
||||
# Skip metrics directory
|
||||
if [ "$service_name" = "metrics" ]; then
|
||||
continue
|
||||
fi
|
||||
|
||||
# Check if directory contains backup files
|
||||
local has_backups=false
|
||||
|
||||
# Check main directory
|
||||
if find "$dir" -maxdepth 1 -type f \( -name "*.tar.gz" -o -name "*.zip" -o -name "*.sql" -o -name "*.sql.gz" -o -name "*.db" \) -print -quit 2>/dev/null | grep -q .; then
|
||||
has_backups=true
|
||||
fi
|
||||
|
||||
# Check scheduled subdirectory
|
||||
if [ -d "${dir}/scheduled" ] && find "${dir}/scheduled" -type f \( -name "*.tar.gz" -o -name "*.zip" -o -name "*.sql" -o -name "*.sql.gz" -o -name "*.db" \) -print -quit 2>/dev/null | grep -q .; then
|
||||
has_backups=true
|
||||
fi
|
||||
|
||||
if [ "$has_backups" = true ]; then
|
||||
services+=("$service_name")
|
||||
fi
|
||||
done < <(find "$BACKUP_ROOT" -mindepth 1 -maxdepth 1 -type d -print0 2>/dev/null || true)
|
||||
|
||||
printf '%s\n' "${services[@]}"
|
||||
}
|
||||
|
||||
# Generate consolidated metrics index
|
||||
generate_consolidated_metrics() {
|
||||
local services=("$@")
|
||||
local consolidated_data="[]"
|
||||
local total_services=${#services[@]}
|
||||
local total_size_bytes=0
|
||||
local total_files=0
|
||||
|
||||
for service in "${services[@]}"; do
|
||||
local service_metrics_file="${METRICS_ROOT}/${service}/metrics.json"
|
||||
|
||||
if [ -f "$service_metrics_file" ]; then
|
||||
local service_data=$(cat "$service_metrics_file")
|
||||
consolidated_data=$(echo "$consolidated_data" | jq --argjson service "$service_data" '. + [$service]')
|
||||
|
||||
# Add to totals
|
||||
local service_size=$(echo "$service_data" | jq -r '.summary.total_size.bytes // 0')
|
||||
local service_files=$(echo "$service_data" | jq -r '.summary.total_files // 0')
|
||||
total_size_bytes=$((total_size_bytes + service_size))
|
||||
total_files=$((total_files + service_files))
|
||||
fi
|
||||
done
|
||||
|
||||
# Generate consolidated summary
|
||||
local total_size_mb=$((total_size_bytes / 1048576))
|
||||
local total_size_human=$(numfmt --to=iec-i --suffix=B "$total_size_bytes" 2>/dev/null || echo "${total_size_mb}MB")
|
||||
|
||||
local consolidated_json
|
||||
consolidated_json=$(jq -n \
|
||||
--argjson services "$consolidated_data" \
|
||||
--argjson total_services "$total_services" \
|
||||
--argjson total_files "$total_files" \
|
||||
--argjson total_size_bytes "$total_size_bytes" \
|
||||
--argjson total_size_mb "$total_size_mb" \
|
||||
--arg total_size_human "$total_size_human" \
|
||||
--arg generated_at "$(date --iso-8601=seconds)" \
|
||||
'{
|
||||
summary: {
|
||||
total_services: $total_services,
|
||||
total_files: $total_files,
|
||||
total_size: {
|
||||
bytes: $total_size_bytes,
|
||||
mb: $total_size_mb,
|
||||
human: $total_size_human
|
||||
}
|
||||
},
|
||||
services: $services,
|
||||
generated_at: $generated_at
|
||||
}')
|
||||
|
||||
echo "$consolidated_json" | jq '.' > "${METRICS_ROOT}/consolidated.json"
|
||||
log_success "Generated consolidated metrics ($total_services services, $total_files files, $total_size_human)"
|
||||
}
|
||||
|
||||
# Generate service index
|
||||
generate_service_index() {
|
||||
local services=("$@")
|
||||
local index_array="[]"
|
||||
|
||||
for service in "${services[@]}"; do
|
||||
local service_info
|
||||
service_info=$(jq -n \
|
||||
--arg name "$service" \
|
||||
--arg metrics_path "/metrics/${service}/metrics.json" \
|
||||
--arg history_path "/metrics/${service}/history.json" \
|
||||
'{
|
||||
name: $name,
|
||||
metrics_path: $metrics_path,
|
||||
history_path: $history_path
|
||||
}')
|
||||
|
||||
index_array=$(echo "$index_array" | jq --argjson service "$service_info" '. + [$service]')
|
||||
done
|
||||
|
||||
local index_json
|
||||
index_json=$(jq -n \
|
||||
--argjson services "$index_array" \
|
||||
--arg generated_at "$(date --iso-8601=seconds)" \
|
||||
'{
|
||||
services: $services,
|
||||
generated_at: $generated_at
|
||||
}')
|
||||
|
||||
echo "$index_json" | jq '.' > "${METRICS_ROOT}/index.json"
|
||||
log_success "Generated service index (${#services[@]} services)"
|
||||
}
|
||||
|
||||
# Watch mode for continuous updates
|
||||
watch_mode() {
|
||||
log_message "Starting watch mode - generating metrics every 60 seconds"
|
||||
log_message "Press Ctrl+C to stop"
|
||||
|
||||
while true; do
|
||||
log_message "Generating metrics..."
|
||||
main_generate_metrics ""
|
||||
log_message "Next update in 60 seconds..."
|
||||
sleep 60
|
||||
done
|
||||
}
|
||||
|
||||
# Main metrics generation function
|
||||
main_generate_metrics() {
|
||||
local target_service="$1"
|
||||
|
||||
log_message "Starting backup metrics generation"
|
||||
|
||||
# Check dependencies
|
||||
if ! check_dependencies; then
|
||||
return 1
|
||||
fi
|
||||
|
||||
# Discover services
|
||||
log_message "Discovering backup services..."
|
||||
local services
|
||||
readarray -t services < <(discover_services)
|
||||
|
||||
if [ ${#services[@]} -eq 0 ]; then
|
||||
log_warning "No backup services found in $BACKUP_ROOT"
|
||||
return 0
|
||||
fi
|
||||
|
||||
log_message "Found ${#services[@]} backup services: ${services[*]}"
|
||||
|
||||
# Generate metrics for specific service or all services
|
||||
if [ -n "$target_service" ]; then
|
||||
if [[ " ${services[*]} " =~ " $target_service " ]]; then
|
||||
get_service_metrics "$target_service"
|
||||
else
|
||||
log_error "Service not found: $target_service"
|
||||
log_message "Available services: ${services[*]}"
|
||||
return 1
|
||||
fi
|
||||
else
|
||||
# Generate metrics for all services
|
||||
for service in "${services[@]}"; do
|
||||
get_service_metrics "$service"
|
||||
done
|
||||
|
||||
# Generate consolidated metrics and index
|
||||
generate_consolidated_metrics "${services[@]}"
|
||||
generate_service_index "${services[@]}"
|
||||
fi
|
||||
|
||||
log_success "Metrics generation completed"
|
||||
log_message "Metrics location: $METRICS_ROOT"
|
||||
}
|
||||
|
||||
# Help function
|
||||
show_help() {
|
||||
echo -e "${BLUE}Backup Metrics JSON Generator${NC}"
|
||||
echo ""
|
||||
echo "Usage: $0 [options] [service_name]"
|
||||
echo ""
|
||||
echo "Options:"
|
||||
echo " -h, --help Show this help message"
|
||||
echo " --watch Monitor mode with auto-refresh every 60 seconds"
|
||||
echo ""
|
||||
echo "Examples:"
|
||||
echo " $0 # Generate metrics for all services"
|
||||
echo " $0 plex # Generate metrics for Plex service only"
|
||||
echo " $0 --watch # Monitor mode with auto-refresh"
|
||||
echo ""
|
||||
echo "Output:"
|
||||
echo " Metrics are generated in: $METRICS_ROOT"
|
||||
echo " - index.json: Service directory"
|
||||
echo " - consolidated.json: All services summary"
|
||||
echo " - {service}/metrics.json: Individual service metrics"
|
||||
echo " - {service}/history.json: Individual service file history"
|
||||
}
|
||||
|
||||
# Main script logic
|
||||
main() {
|
||||
case "${1:-}" in
|
||||
-h|--help)
|
||||
show_help
|
||||
exit 0
|
||||
;;
|
||||
--watch)
|
||||
watch_mode
|
||||
;;
|
||||
*)
|
||||
main_generate_metrics "$1"
|
||||
;;
|
||||
esac
|
||||
}
|
||||
|
||||
# Run main function
|
||||
main "$@"
|
||||
Reference in New Issue
Block a user