mirror of
https://github.com/acedanger/shell.git
synced 2025-12-06 05:40:11 -08:00
feat: Add base HTML template and implement dashboard, logs, and service views
- Created a base HTML template for consistent layout across pages. - Developed a dashboard page to display backup service metrics and statuses. - Implemented a log viewer for detailed log file inspection. - Added error handling page for better user experience during failures. - Introduced service detail page to show specific service metrics and actions. - Enhanced log filtering and viewing capabilities. - Integrated auto-refresh functionality for real-time updates on metrics. - Created integration and unit test scripts for backup metrics functionality.
This commit is contained in:
489
lib/backup-json-logger.sh.deprecated
Normal file
489
lib/backup-json-logger.sh.deprecated
Normal file
@@ -0,0 +1,489 @@
|
||||
#!/bin/bash
|
||||
|
||||
################################################################################
|
||||
# Backup JSON Logger Library
|
||||
################################################################################
|
||||
#
|
||||
# Author: Peter Wood <peter@peterwood.dev>
|
||||
# Description: Reusable JSON logging system for backup scripts to generate
|
||||
# real-time metrics and status updates during backup operations.
|
||||
#
|
||||
# Features:
|
||||
# - Real-time JSON metrics generation during backup operations
|
||||
# - Standardized JSON structure across all backup services
|
||||
# - Runtime metrics tracking (start time, duration, status, etc.)
|
||||
# - Progress tracking with file-by-file updates
|
||||
# - Error handling and recovery state tracking
|
||||
# - Web application compatible JSON format
|
||||
#
|
||||
# Usage:
|
||||
# source /home/acedanger/shell/lib/backup-json-logger.sh
|
||||
#
|
||||
# # Initialize backup session
|
||||
# json_backup_init "plex" "/mnt/share/media/backups/plex"
|
||||
#
|
||||
# # Update status during backup
|
||||
# json_backup_start
|
||||
# json_backup_add_file "/path/to/file" "success" "1024" "abc123"
|
||||
# json_backup_complete "success"
|
||||
#
|
||||
################################################################################
|
||||
|
||||
# Global configuration
|
||||
JSON_METRICS_ROOT="${BACKUP_ROOT:-/mnt/share/media/backups}/metrics"
|
||||
JSON_LOGGER_DEBUG="${JSON_LOGGER_DEBUG:-false}"
|
||||
|
||||
# JSON logger internal variables
|
||||
declare -g JSON_BACKUP_SERVICE=""
|
||||
declare -g JSON_BACKUP_PATH=""
|
||||
declare -g JSON_BACKUP_SESSION_ID=""
|
||||
declare -g JSON_BACKUP_START_TIME=""
|
||||
declare -g JSON_BACKUP_LOG_FILE=""
|
||||
declare -g JSON_BACKUP_METRICS_FILE=""
|
||||
declare -g JSON_BACKUP_TEMP_DIR=""
|
||||
|
||||
# Logging function for debug messages
|
||||
json_log_debug() {
|
||||
if [ "$JSON_LOGGER_DEBUG" = "true" ]; then
|
||||
echo "[JSON-LOGGER] $1" >&2
|
||||
fi
|
||||
}
|
||||
|
||||
# Initialize JSON logging for a backup session
|
||||
json_backup_init() {
|
||||
local service_name="$1"
|
||||
local backup_path="$2"
|
||||
local custom_session_id="$3"
|
||||
|
||||
if [ -z "$service_name" ] || [ -z "$backup_path" ]; then
|
||||
echo "Error: json_backup_init requires service_name and backup_path" >&2
|
||||
return 1
|
||||
fi
|
||||
|
||||
# Set global variables
|
||||
JSON_BACKUP_SERVICE="$service_name"
|
||||
JSON_BACKUP_PATH="$backup_path"
|
||||
JSON_BACKUP_SESSION_ID="${custom_session_id:-$(date +%Y%m%d_%H%M%S)}"
|
||||
JSON_BACKUP_START_TIME=$(date +%s)
|
||||
|
||||
# Create metrics directory structure
|
||||
local service_metrics_dir="$JSON_METRICS_ROOT/$service_name"
|
||||
mkdir -p "$service_metrics_dir"
|
||||
|
||||
# Create temporary directory for this session
|
||||
JSON_BACKUP_TEMP_DIR="$service_metrics_dir/.tmp_${JSON_BACKUP_SESSION_ID}"
|
||||
mkdir -p "$JSON_BACKUP_TEMP_DIR"
|
||||
|
||||
# Set file paths
|
||||
JSON_BACKUP_LOG_FILE="$JSON_BACKUP_TEMP_DIR/backup_session.json"
|
||||
JSON_BACKUP_METRICS_FILE="$service_metrics_dir/metrics.json"
|
||||
|
||||
json_log_debug "Initialized JSON logging for $service_name (session: $JSON_BACKUP_SESSION_ID)"
|
||||
|
||||
# Create initial session file
|
||||
json_create_initial_session
|
||||
|
||||
return 0
|
||||
}
|
||||
|
||||
# Create initial backup session JSON structure
|
||||
json_create_initial_session() {
|
||||
local session_data
|
||||
session_data=$(jq -n \
|
||||
--arg service "$JSON_BACKUP_SERVICE" \
|
||||
--arg session_id "$JSON_BACKUP_SESSION_ID" \
|
||||
--arg backup_path "$JSON_BACKUP_PATH" \
|
||||
--argjson start_time "$JSON_BACKUP_START_TIME" \
|
||||
--arg start_iso "$(date -d "@$JSON_BACKUP_START_TIME" --iso-8601=seconds)" \
|
||||
--arg status "initialized" \
|
||||
--arg hostname "$(hostname)" \
|
||||
'{
|
||||
service_name: $service,
|
||||
session_id: $session_id,
|
||||
backup_path: $backup_path,
|
||||
hostname: $hostname,
|
||||
status: $status,
|
||||
start_time: {
|
||||
epoch: $start_time,
|
||||
iso: $start_iso
|
||||
},
|
||||
end_time: null,
|
||||
duration_seconds: null,
|
||||
files: [],
|
||||
summary: {
|
||||
total_files: 0,
|
||||
successful_files: 0,
|
||||
failed_files: 0,
|
||||
total_size_bytes: 0,
|
||||
errors: []
|
||||
},
|
||||
performance: {
|
||||
backup_phase_duration: null,
|
||||
verification_phase_duration: null,
|
||||
compression_phase_duration: null,
|
||||
cleanup_phase_duration: null
|
||||
},
|
||||
metadata: {
|
||||
script_version: "1.0",
|
||||
json_logger_version: "1.0",
|
||||
last_updated: $start_iso
|
||||
}
|
||||
}')
|
||||
|
||||
echo "$session_data" > "$JSON_BACKUP_LOG_FILE"
|
||||
json_log_debug "Created initial session file: $JSON_BACKUP_LOG_FILE"
|
||||
}
|
||||
|
||||
# Update backup status
|
||||
json_backup_update_status() {
|
||||
local new_status="$1"
|
||||
local error_message="$2"
|
||||
|
||||
if [ ! -f "$JSON_BACKUP_LOG_FILE" ]; then
|
||||
json_log_debug "Warning: Session file not found, cannot update status"
|
||||
return 1
|
||||
fi
|
||||
|
||||
local updated_session
|
||||
local current_time
|
||||
current_time=$(date +%s)
|
||||
local current_iso
|
||||
current_iso=$(date --iso-8601=seconds)
|
||||
|
||||
# Build jq command based on whether we have an error message
|
||||
if [ -n "$error_message" ]; then
|
||||
updated_session=$(jq \
|
||||
--arg status "$new_status" \
|
||||
--arg error "$error_message" \
|
||||
--arg updated "$current_iso" \
|
||||
'.status = $status | .summary.errors += [$error] | .metadata.last_updated = $updated' \
|
||||
"$JSON_BACKUP_LOG_FILE")
|
||||
else
|
||||
updated_session=$(jq \
|
||||
--arg status "$new_status" \
|
||||
--arg updated "$current_iso" \
|
||||
'.status = $status | .metadata.last_updated = $updated' \
|
||||
"$JSON_BACKUP_LOG_FILE")
|
||||
fi
|
||||
|
||||
echo "$updated_session" > "$JSON_BACKUP_LOG_FILE"
|
||||
json_log_debug "Updated status to: $new_status"
|
||||
|
||||
# Update the main metrics file
|
||||
json_update_main_metrics
|
||||
}
|
||||
|
||||
# Mark backup as started
|
||||
json_backup_start() {
|
||||
json_backup_update_status "running"
|
||||
}
|
||||
|
||||
# Add a file to the backup session
|
||||
json_backup_add_file() {
|
||||
local file_path="$1"
|
||||
local status="$2" # "success", "failed", "skipped"
|
||||
local size_bytes="$3" # File size in bytes
|
||||
local checksum="$4" # Optional checksum
|
||||
local error_message="$5" # Optional error message
|
||||
|
||||
if [ ! -f "$JSON_BACKUP_LOG_FILE" ]; then
|
||||
json_log_debug "Warning: Session file not found, cannot add file"
|
||||
return 1
|
||||
fi
|
||||
|
||||
# Get file metadata
|
||||
local filename
|
||||
filename=$(basename "$file_path")
|
||||
local modified_time=""
|
||||
local modified_iso=""
|
||||
|
||||
if [ -f "$file_path" ]; then
|
||||
modified_time=$(stat -c%Y "$file_path" 2>/dev/null || echo "0")
|
||||
modified_iso=$(date -d "@$modified_time" --iso-8601=seconds 2>/dev/null || echo "")
|
||||
fi
|
||||
|
||||
# Create file entry
|
||||
local file_entry
|
||||
file_entry=$(jq -n \
|
||||
--arg path "$file_path" \
|
||||
--arg filename "$filename" \
|
||||
--arg status "$status" \
|
||||
--argjson size_bytes "${size_bytes:-0}" \
|
||||
--arg checksum "${checksum:-}" \
|
||||
--argjson modified_time "${modified_time:-0}" \
|
||||
--arg modified_iso "$modified_iso" \
|
||||
--arg processed_at "$(date --iso-8601=seconds)" \
|
||||
--arg error_message "${error_message:-}" \
|
||||
'{
|
||||
path: $path,
|
||||
filename: $filename,
|
||||
status: $status,
|
||||
size_bytes: $size_bytes,
|
||||
size_human: (if $size_bytes > 0 then ($size_bytes | tostring | tonumber | . / 1048576 | tostring + "MB") else "0B" end),
|
||||
checksum: $checksum,
|
||||
modified_time: {
|
||||
epoch: $modified_time,
|
||||
iso: $modified_iso
|
||||
},
|
||||
processed_at: $processed_at,
|
||||
error_message: (if $error_message != "" then $error_message else null end)
|
||||
}')
|
||||
|
||||
# Add file to session and update summary
|
||||
local updated_session
|
||||
updated_session=$(jq \
|
||||
--argjson file_entry "$file_entry" \
|
||||
--arg current_time "$(date --iso-8601=seconds)" \
|
||||
'
|
||||
.files += [$file_entry] |
|
||||
.summary.total_files += 1 |
|
||||
(if $file_entry.status == "success" then .summary.successful_files += 1 else . end) |
|
||||
(if $file_entry.status == "failed" then .summary.failed_files += 1 else . end) |
|
||||
.summary.total_size_bytes += $file_entry.size_bytes |
|
||||
.metadata.last_updated = $current_time
|
||||
' \
|
||||
"$JSON_BACKUP_LOG_FILE")
|
||||
|
||||
echo "$updated_session" > "$JSON_BACKUP_LOG_FILE"
|
||||
json_log_debug "Added file: $filename ($status)"
|
||||
|
||||
# Update the main metrics file
|
||||
json_update_main_metrics
|
||||
}
|
||||
|
||||
# Record performance phase timing
|
||||
json_backup_record_phase() {
|
||||
local phase_name="$1" # "backup", "verification", "compression", "cleanup"
|
||||
local duration_seconds="$2" # Duration in seconds
|
||||
|
||||
if [ ! -f "$JSON_BACKUP_LOG_FILE" ]; then
|
||||
json_log_debug "Warning: Session file not found, cannot record phase"
|
||||
return 1
|
||||
fi
|
||||
|
||||
local phase_field="${phase_name}_phase_duration"
|
||||
|
||||
local updated_session
|
||||
updated_session=$(jq \
|
||||
--arg phase "$phase_field" \
|
||||
--argjson duration "$duration_seconds" \
|
||||
--arg updated "$(date --iso-8601=seconds)" \
|
||||
'.performance[$phase] = $duration | .metadata.last_updated = $updated' \
|
||||
"$JSON_BACKUP_LOG_FILE")
|
||||
|
||||
echo "$updated_session" > "$JSON_BACKUP_LOG_FILE"
|
||||
json_log_debug "Recorded $phase_name phase: ${duration_seconds}s"
|
||||
}
|
||||
|
||||
# Complete the backup session
|
||||
json_backup_complete() {
|
||||
local final_status="$1" # "success", "failed", "partial"
|
||||
local final_message="$2" # Optional completion message
|
||||
|
||||
if [ ! -f "$JSON_BACKUP_LOG_FILE" ]; then
|
||||
json_log_debug "Warning: Session file not found, cannot complete"
|
||||
return 1
|
||||
fi
|
||||
|
||||
local end_time
|
||||
end_time=$(date +%s)
|
||||
local end_iso
|
||||
end_iso=$(date --iso-8601=seconds)
|
||||
local duration
|
||||
duration=$((end_time - JSON_BACKUP_START_TIME))
|
||||
|
||||
# Complete the session
|
||||
local completed_session
|
||||
if [ -n "$final_message" ]; then
|
||||
completed_session=$(jq \
|
||||
--arg status "$final_status" \
|
||||
--argjson end_time "$end_time" \
|
||||
--arg end_iso "$end_iso" \
|
||||
--argjson duration "$duration" \
|
||||
--arg message "$final_message" \
|
||||
--arg updated "$end_iso" \
|
||||
'
|
||||
.status = $status |
|
||||
.end_time = {epoch: $end_time, iso: $end_iso} |
|
||||
.duration_seconds = $duration |
|
||||
.completion_message = $message |
|
||||
.metadata.last_updated = $updated
|
||||
' \
|
||||
"$JSON_BACKUP_LOG_FILE")
|
||||
else
|
||||
completed_session=$(jq \
|
||||
--arg status "$final_status" \
|
||||
--argjson end_time "$end_time" \
|
||||
--arg end_iso "$end_iso" \
|
||||
--argjson duration "$duration" \
|
||||
--arg updated "$end_iso" \
|
||||
'
|
||||
.status = $status |
|
||||
.end_time = {epoch: $end_time, iso: $end_iso} |
|
||||
.duration_seconds = $duration |
|
||||
.metadata.last_updated = $updated
|
||||
' \
|
||||
"$JSON_BACKUP_LOG_FILE")
|
||||
fi
|
||||
|
||||
echo "$completed_session" > "$JSON_BACKUP_LOG_FILE"
|
||||
json_log_debug "Completed backup session: $final_status (${duration}s)"
|
||||
|
||||
# Final update to main metrics
|
||||
json_update_main_metrics
|
||||
|
||||
# Archive session to history
|
||||
json_archive_session
|
||||
|
||||
# Cleanup temporary directory
|
||||
json_cleanup_session
|
||||
}
|
||||
|
||||
# Update the main metrics.json file
|
||||
json_update_main_metrics() {
|
||||
if [ ! -f "$JSON_BACKUP_LOG_FILE" ]; then
|
||||
return 1
|
||||
fi
|
||||
|
||||
# Read current session data
|
||||
local session_data
|
||||
session_data=$(cat "$JSON_BACKUP_LOG_FILE")
|
||||
|
||||
# Get latest backup info (most recent successful file)
|
||||
local latest_backup
|
||||
latest_backup=$(echo "$session_data" | jq '
|
||||
.files |
|
||||
map(select(.status == "success")) |
|
||||
sort_by(.processed_at) |
|
||||
last // {}
|
||||
')
|
||||
|
||||
# Create current metrics
|
||||
local current_metrics
|
||||
current_metrics=$(echo "$session_data" | jq \
|
||||
--argjson latest_backup "$latest_backup" \
|
||||
'{
|
||||
service_name: .service_name,
|
||||
backup_path: .backup_path,
|
||||
current_session: {
|
||||
session_id: .session_id,
|
||||
status: .status,
|
||||
start_time: .start_time,
|
||||
end_time: .end_time,
|
||||
duration_seconds: .duration_seconds,
|
||||
files_processed: .summary.total_files,
|
||||
files_successful: .summary.successful_files,
|
||||
files_failed: .summary.failed_files,
|
||||
total_size_bytes: .summary.total_size_bytes,
|
||||
total_size_human: (if .summary.total_size_bytes > 0 then (.summary.total_size_bytes / 1048576 | tostring + "MB") else "0B" end),
|
||||
errors: .summary.errors,
|
||||
performance: .performance
|
||||
},
|
||||
latest_backup: $latest_backup,
|
||||
generated_at: .metadata.last_updated
|
||||
}')
|
||||
|
||||
# Write to main metrics file
|
||||
echo "$current_metrics" > "$JSON_BACKUP_METRICS_FILE"
|
||||
json_log_debug "Updated main metrics file"
|
||||
}
|
||||
|
||||
# Archive completed session to history
|
||||
json_archive_session() {
|
||||
if [ ! -f "$JSON_BACKUP_LOG_FILE" ]; then
|
||||
return 1
|
||||
fi
|
||||
|
||||
local service_metrics_dir
|
||||
service_metrics_dir=$(dirname "$JSON_BACKUP_METRICS_FILE")
|
||||
local history_file="$service_metrics_dir/history.json"
|
||||
|
||||
# Read current session
|
||||
local session_data
|
||||
session_data=$(cat "$JSON_BACKUP_LOG_FILE")
|
||||
|
||||
# Initialize history file if it doesn't exist
|
||||
if [ ! -f "$history_file" ]; then
|
||||
echo '{"service_name": "'$JSON_BACKUP_SERVICE'", "sessions": []}' > "$history_file"
|
||||
fi
|
||||
|
||||
# Add session to history
|
||||
local updated_history
|
||||
updated_history=$(jq \
|
||||
--argjson session "$session_data" \
|
||||
'.sessions += [$session] | .sessions |= sort_by(.start_time.epoch) | .sessions |= reverse' \
|
||||
"$history_file")
|
||||
|
||||
echo "$updated_history" > "$history_file"
|
||||
json_log_debug "Archived session to history"
|
||||
}
|
||||
|
||||
# Cleanup session temporary files
|
||||
json_cleanup_session() {
|
||||
if [ -d "$JSON_BACKUP_TEMP_DIR" ]; then
|
||||
rm -rf "$JSON_BACKUP_TEMP_DIR"
|
||||
json_log_debug "Cleaned up temporary session directory"
|
||||
fi
|
||||
}
|
||||
|
||||
# Get current backup status (for external monitoring)
|
||||
json_get_current_status() {
|
||||
local service_name="$1"
|
||||
|
||||
if [ -z "$service_name" ]; then
|
||||
echo "Error: Service name required" >&2
|
||||
return 1
|
||||
fi
|
||||
|
||||
local metrics_file="$JSON_METRICS_ROOT/$service_name/metrics.json"
|
||||
|
||||
if [ -f "$metrics_file" ]; then
|
||||
cat "$metrics_file"
|
||||
else
|
||||
echo "{\"error\": \"No metrics found for service: $service_name\"}"
|
||||
fi
|
||||
}
|
||||
|
||||
# Helper function to track phase timing
|
||||
json_backup_time_phase() {
|
||||
local phase_name="$1"
|
||||
local start_time="$2"
|
||||
|
||||
if [ -z "$start_time" ]; then
|
||||
echo "Error: Start time required for phase timing" >&2
|
||||
return 1
|
||||
fi
|
||||
|
||||
local end_time
|
||||
end_time=$(date +%s)
|
||||
local duration
|
||||
duration=$((end_time - start_time))
|
||||
|
||||
json_backup_record_phase "$phase_name" "$duration"
|
||||
}
|
||||
|
||||
# Convenience function for error handling
|
||||
json_backup_error() {
|
||||
local error_message="$1"
|
||||
local file_path="$2"
|
||||
|
||||
if [ -n "$file_path" ]; then
|
||||
json_backup_add_file "$file_path" "failed" "0" "" "$error_message"
|
||||
else
|
||||
json_backup_update_status "failed" "$error_message"
|
||||
fi
|
||||
}
|
||||
|
||||
# Export all functions for use in other scripts
|
||||
export -f json_backup_init
|
||||
export -f json_backup_start
|
||||
export -f json_backup_add_file
|
||||
export -f json_backup_record_phase
|
||||
export -f json_backup_complete
|
||||
export -f json_backup_update_status
|
||||
export -f json_backup_error
|
||||
export -f json_backup_time_phase
|
||||
export -f json_get_current_status
|
||||
export -f json_log_debug
|
||||
|
||||
json_log_debug "Backup JSON Logger library loaded"
|
||||
0
lib/backup-metrics-lib.sh
Normal file
0
lib/backup-metrics-lib.sh
Normal file
246
lib/unified-backup-metrics-simple.sh
Normal file
246
lib/unified-backup-metrics-simple.sh
Normal file
@@ -0,0 +1,246 @@
|
||||
#!/bin/bash
|
||||
|
||||
################################################################################
|
||||
# Simplified Unified Backup Metrics Library
|
||||
################################################################################
|
||||
#
|
||||
# Author: Peter Wood <peter@peterwood.dev>
|
||||
# Description: Lightweight backup metrics tracking for personal backup systems.
|
||||
# Provides essential status tracking without enterprise complexity.
|
||||
#
|
||||
# Features:
|
||||
# - Simple JSON status files (one per service)
|
||||
# - Basic timing and file counting
|
||||
# - Minimal performance overhead
|
||||
# - Easy to debug and maintain
|
||||
# - Web interface ready
|
||||
#
|
||||
# Usage:
|
||||
# source /home/acedanger/shell/lib/unified-backup-metrics-simple.sh
|
||||
#
|
||||
# metrics_backup_start "service-name" "description" "/backup/path"
|
||||
# metrics_update_status "running" "Current operation"
|
||||
# metrics_file_backup_complete "/path/to/file" "1024" "success"
|
||||
# metrics_backup_complete "success" "Backup completed successfully"
|
||||
#
|
||||
################################################################################
|
||||
|
||||
# Configuration
|
||||
METRICS_ROOT="${BACKUP_ROOT:-/mnt/share/media/backups}/metrics"
|
||||
METRICS_DEBUG="${METRICS_DEBUG:-false}"
|
||||
|
||||
# Global state
|
||||
declare -g METRICS_SERVICE=""
|
||||
declare -g METRICS_START_TIME=""
|
||||
declare -g METRICS_STATUS_FILE=""
|
||||
declare -g METRICS_FILE_COUNT=0
|
||||
declare -g METRICS_TOTAL_SIZE=0
|
||||
|
||||
# Debug function
|
||||
metrics_debug() {
|
||||
if [ "$METRICS_DEBUG" = "true" ]; then
|
||||
echo "[METRICS] $1" >&2
|
||||
fi
|
||||
}
|
||||
|
||||
# Initialize metrics for a backup service
|
||||
metrics_backup_start() {
|
||||
local service_name="$1"
|
||||
local description="$2"
|
||||
local backup_path="$3"
|
||||
|
||||
if [ -z "$service_name" ]; then
|
||||
metrics_debug "Warning: No service name provided to metrics_backup_start"
|
||||
return 1
|
||||
fi
|
||||
|
||||
# Set global state
|
||||
METRICS_SERVICE="$service_name"
|
||||
METRICS_START_TIME=$(date +%s)
|
||||
METRICS_FILE_COUNT=0
|
||||
METRICS_TOTAL_SIZE=0
|
||||
|
||||
# Create metrics directory
|
||||
mkdir -p "$METRICS_ROOT"
|
||||
|
||||
# Set status file path
|
||||
METRICS_STATUS_FILE="$METRICS_ROOT/${service_name}_status.json"
|
||||
|
||||
# Create initial status
|
||||
cat > "$METRICS_STATUS_FILE" << EOF
|
||||
{
|
||||
"service": "$service_name",
|
||||
"description": "$description",
|
||||
"backup_path": "$backup_path",
|
||||
"status": "running",
|
||||
"start_time": "$(date -d "@$METRICS_START_TIME" --iso-8601=seconds)",
|
||||
"start_timestamp": $METRICS_START_TIME,
|
||||
"current_operation": "Starting backup",
|
||||
"files_processed": 0,
|
||||
"total_size_bytes": 0,
|
||||
"last_updated": "$(date --iso-8601=seconds)",
|
||||
"hostname": "$(hostname)"
|
||||
}
|
||||
EOF
|
||||
|
||||
metrics_debug "Started metrics tracking for $service_name"
|
||||
return 0
|
||||
}
|
||||
|
||||
# Update backup status
|
||||
metrics_update_status() {
|
||||
local status="$1"
|
||||
local operation="$2"
|
||||
|
||||
if [ -z "$METRICS_STATUS_FILE" ] || [ ! -f "$METRICS_STATUS_FILE" ]; then
|
||||
metrics_debug "Warning: No active metrics session for status update"
|
||||
return 1
|
||||
fi
|
||||
|
||||
# Update the status file using jq if available, otherwise simple replacement
|
||||
if command -v jq >/dev/null 2>&1; then
|
||||
local temp_file="${METRICS_STATUS_FILE}.tmp"
|
||||
jq --arg status "$status" \
|
||||
--arg operation "$operation" \
|
||||
--arg updated "$(date --iso-8601=seconds)" \
|
||||
'.status = $status | .current_operation = $operation | .last_updated = $updated' \
|
||||
"$METRICS_STATUS_FILE" > "$temp_file" && mv "$temp_file" "$METRICS_STATUS_FILE"
|
||||
else
|
||||
# Fallback without jq - just add a simple status line to end of file
|
||||
echo "# Status: $status - $operation ($(date --iso-8601=seconds))" >> "$METRICS_STATUS_FILE"
|
||||
fi
|
||||
|
||||
metrics_debug "Updated status: $status - $operation"
|
||||
return 0
|
||||
}
|
||||
|
||||
# Track individual file backup completion
|
||||
metrics_file_backup_complete() {
|
||||
local file_path="$1"
|
||||
local file_size="$2"
|
||||
local status="$3" # "success", "failed", "skipped"
|
||||
|
||||
if [ -z "$METRICS_STATUS_FILE" ] || [ ! -f "$METRICS_STATUS_FILE" ]; then
|
||||
metrics_debug "Warning: No active metrics session for file tracking"
|
||||
return 1
|
||||
fi
|
||||
|
||||
# Update counters
|
||||
if [ "$status" = "success" ]; then
|
||||
METRICS_FILE_COUNT=$((METRICS_FILE_COUNT + 1))
|
||||
METRICS_TOTAL_SIZE=$((METRICS_TOTAL_SIZE + ${file_size:-0}))
|
||||
fi
|
||||
|
||||
# Update status file with new counts if jq is available
|
||||
if command -v jq >/dev/null 2>&1; then
|
||||
local temp_file="${METRICS_STATUS_FILE}.tmp"
|
||||
jq --argjson files "$METRICS_FILE_COUNT" \
|
||||
--argjson size "$METRICS_TOTAL_SIZE" \
|
||||
--arg updated "$(date --iso-8601=seconds)" \
|
||||
'.files_processed = $files | .total_size_bytes = $size | .last_updated = $updated' \
|
||||
"$METRICS_STATUS_FILE" > "$temp_file" && mv "$temp_file" "$METRICS_STATUS_FILE"
|
||||
fi
|
||||
|
||||
metrics_debug "File tracked: $(basename "$file_path") ($status, ${file_size:-0} bytes)"
|
||||
return 0
|
||||
}
|
||||
|
||||
# Complete backup and finalize metrics
|
||||
metrics_backup_complete() {
|
||||
local final_status="$1" # "success", "failed", "completed_with_errors"
|
||||
local message="$2"
|
||||
|
||||
if [ -z "$METRICS_STATUS_FILE" ] || [ ! -f "$METRICS_STATUS_FILE" ]; then
|
||||
metrics_debug "Warning: No active metrics session to complete"
|
||||
return 1
|
||||
fi
|
||||
|
||||
local end_time=$(date +%s)
|
||||
local duration=$((end_time - METRICS_START_TIME))
|
||||
|
||||
# Create final status file
|
||||
if command -v jq >/dev/null 2>&1; then
|
||||
local temp_file="${METRICS_STATUS_FILE}.tmp"
|
||||
jq --arg status "$final_status" \
|
||||
--arg message "$message" \
|
||||
--arg end_time "$(date -d "@$end_time" --iso-8601=seconds)" \
|
||||
--argjson end_timestamp "$end_time" \
|
||||
--argjson duration "$duration" \
|
||||
--argjson files "$METRICS_FILE_COUNT" \
|
||||
--argjson size "$METRICS_TOTAL_SIZE" \
|
||||
--arg updated "$(date --iso-8601=seconds)" \
|
||||
'.status = $status |
|
||||
.message = $message |
|
||||
.end_time = $end_time |
|
||||
.end_timestamp = $end_timestamp |
|
||||
.duration_seconds = $duration |
|
||||
.files_processed = $files |
|
||||
.total_size_bytes = $size |
|
||||
.current_operation = "Completed" |
|
||||
.last_updated = $updated' \
|
||||
"$METRICS_STATUS_FILE" > "$temp_file" && mv "$temp_file" "$METRICS_STATUS_FILE"
|
||||
else
|
||||
# Fallback - append completion info
|
||||
cat >> "$METRICS_STATUS_FILE" << EOF
|
||||
# COMPLETION: $final_status
|
||||
# MESSAGE: $message
|
||||
# END_TIME: $(date -d "@$end_time" --iso-8601=seconds)
|
||||
# DURATION: ${duration}s
|
||||
# FILES: $METRICS_FILE_COUNT
|
||||
# SIZE: $METRICS_TOTAL_SIZE bytes
|
||||
EOF
|
||||
fi
|
||||
|
||||
metrics_debug "Backup completed: $final_status ($duration seconds, $METRICS_FILE_COUNT files)"
|
||||
|
||||
# Clear global state
|
||||
METRICS_SERVICE=""
|
||||
METRICS_START_TIME=""
|
||||
METRICS_STATUS_FILE=""
|
||||
METRICS_FILE_COUNT=0
|
||||
METRICS_TOTAL_SIZE=0
|
||||
|
||||
return 0
|
||||
}
|
||||
|
||||
# Legacy compatibility functions (for existing integrations)
|
||||
metrics_init() {
|
||||
metrics_backup_start "$1" "${2:-Backup operation}" "${3:-/backup}"
|
||||
}
|
||||
|
||||
metrics_start_backup() {
|
||||
metrics_update_status "running" "Backup in progress"
|
||||
}
|
||||
|
||||
metrics_add_file() {
|
||||
metrics_file_backup_complete "$1" "$3" "$2"
|
||||
}
|
||||
|
||||
metrics_complete_backup() {
|
||||
metrics_backup_complete "$1" "${2:-Backup operation completed}"
|
||||
}
|
||||
|
||||
# Utility function to get current status
|
||||
metrics_get_status() {
|
||||
local service_name="$1"
|
||||
local status_file="$METRICS_ROOT/${service_name}_status.json"
|
||||
|
||||
if [ -f "$status_file" ]; then
|
||||
if command -v jq >/dev/null 2>&1; then
|
||||
jq -r '.status' "$status_file" 2>/dev/null || echo "unknown"
|
||||
else
|
||||
echo "available"
|
||||
fi
|
||||
else
|
||||
echo "never_run"
|
||||
fi
|
||||
}
|
||||
|
||||
# Utility function to list all services with metrics
|
||||
metrics_list_services() {
|
||||
if [ -d "$METRICS_ROOT" ]; then
|
||||
find "$METRICS_ROOT" -name "*_status.json" -exec basename {} \; | sed 's/_status\.json$//' | sort
|
||||
fi
|
||||
}
|
||||
|
||||
metrics_debug "Simplified unified backup metrics library loaded"
|
||||
251
lib/unified-backup-metrics.sh
Normal file
251
lib/unified-backup-metrics.sh
Normal file
@@ -0,0 +1,251 @@
|
||||
#!/bin/bash
|
||||
|
||||
################################################################################
|
||||
# Simplified Unified Backup Metrics Library
|
||||
################################################################################
|
||||
#
|
||||
# Author: Peter Wood <peter@peterwood.dev>
|
||||
# Description: Lightweight backup metrics tracking for personal backup systems.
|
||||
# Provides essential status tracking without enterprise complexity.
|
||||
#
|
||||
# Features:
|
||||
# - Simple JSON status files (one per service)
|
||||
# - Basic timing and file counting
|
||||
# - Minimal performance overhead
|
||||
# - Easy to debug and maintain
|
||||
# - Web interface ready
|
||||
#
|
||||
# Usage:
|
||||
# source /home/acedanger/shell/lib/unified-backup-metrics-simple.sh
|
||||
#
|
||||
# metrics_backup_start "service-name" "description" "/backup/path"
|
||||
# metrics_update_status "running" "Current operation"
|
||||
# metrics_file_backup_complete "/path/to/file" "1024" "success"
|
||||
# metrics_backup_complete "success" "Backup completed successfully"
|
||||
#
|
||||
################################################################################
|
||||
|
||||
# Configuration
|
||||
METRICS_ROOT="${BACKUP_ROOT:-/mnt/share/media/backups}/metrics"
|
||||
METRICS_DEBUG="${METRICS_DEBUG:-false}"
|
||||
|
||||
# Global state
|
||||
declare -g METRICS_SERVICE=""
|
||||
declare -g METRICS_START_TIME=""
|
||||
declare -g METRICS_STATUS_FILE=""
|
||||
declare -g METRICS_FILE_COUNT=0
|
||||
declare -g METRICS_TOTAL_SIZE=0
|
||||
|
||||
# Debug function
|
||||
metrics_debug() {
|
||||
if [ "$METRICS_DEBUG" = "true" ]; then
|
||||
echo "[METRICS] $1" >&2
|
||||
fi
|
||||
}
|
||||
|
||||
# Initialize metrics for a backup service
|
||||
metrics_backup_start() {
|
||||
local service_name="$1"
|
||||
local description="$2"
|
||||
local backup_path="$3"
|
||||
|
||||
if [ -z "$service_name" ]; then
|
||||
metrics_debug "Warning: No service name provided to metrics_backup_start"
|
||||
return 1
|
||||
fi
|
||||
|
||||
# Set global state
|
||||
METRICS_SERVICE="$service_name"
|
||||
METRICS_START_TIME=$(date +%s)
|
||||
METRICS_FILE_COUNT=0
|
||||
METRICS_TOTAL_SIZE=0
|
||||
|
||||
# Create metrics directory
|
||||
mkdir -p "$METRICS_ROOT"
|
||||
|
||||
# Set status file path
|
||||
METRICS_STATUS_FILE="$METRICS_ROOT/${service_name}_status.json"
|
||||
|
||||
# Create initial status
|
||||
cat > "$METRICS_STATUS_FILE" << EOF
|
||||
{
|
||||
"service": "$service_name",
|
||||
"description": "$description",
|
||||
"backup_path": "$backup_path",
|
||||
"status": "running",
|
||||
"start_time": "$(date -d "@$METRICS_START_TIME" --iso-8601=seconds)",
|
||||
"start_timestamp": $METRICS_START_TIME,
|
||||
"current_operation": "Starting backup",
|
||||
"files_processed": 0,
|
||||
"total_size_bytes": 0,
|
||||
"last_updated": "$(date --iso-8601=seconds)",
|
||||
"hostname": "$(hostname)"
|
||||
}
|
||||
EOF
|
||||
|
||||
metrics_debug "Started metrics tracking for $service_name"
|
||||
return 0
|
||||
}
|
||||
|
||||
# Update backup status
|
||||
metrics_update_status() {
|
||||
local new_status="$1"
|
||||
local operation="$2"
|
||||
|
||||
if [ -z "$METRICS_STATUS_FILE" ] || [ ! -f "$METRICS_STATUS_FILE" ]; then
|
||||
metrics_debug "Warning: No active metrics session for status update"
|
||||
return 1
|
||||
fi
|
||||
|
||||
# Update the status file using jq if available, otherwise simple replacement
|
||||
if command -v jq >/dev/null 2>&1; then
|
||||
local temp_file="${METRICS_STATUS_FILE}.tmp"
|
||||
jq --arg status "$new_status" \
|
||||
--arg operation "$operation" \
|
||||
--arg updated "$(date --iso-8601=seconds)" \
|
||||
'.status = $status | .current_operation = $operation | .last_updated = $updated' \
|
||||
"$METRICS_STATUS_FILE" > "$temp_file" && mv "$temp_file" "$METRICS_STATUS_FILE"
|
||||
else
|
||||
# Fallback without jq - just add a simple status line to end of file
|
||||
echo "# Status: $new_status - $operation ($(date --iso-8601=seconds))" >> "$METRICS_STATUS_FILE"
|
||||
fi
|
||||
|
||||
metrics_debug "Updated status: $new_status - $operation"
|
||||
return 0
|
||||
}
|
||||
|
||||
# Track individual file backup completion
|
||||
metrics_file_backup_complete() {
|
||||
local file_path="$1"
|
||||
local file_size="$2"
|
||||
local file_status="$3" # "success", "failed", "skipped"
|
||||
|
||||
if [ -z "$METRICS_STATUS_FILE" ] || [ ! -f "$METRICS_STATUS_FILE" ]; then
|
||||
metrics_debug "Warning: No active metrics session for file tracking"
|
||||
return 1
|
||||
fi
|
||||
|
||||
# Update counters
|
||||
if [ "$file_status" = "success" ]; then
|
||||
METRICS_FILE_COUNT=$((METRICS_FILE_COUNT + 1))
|
||||
METRICS_TOTAL_SIZE=$((METRICS_TOTAL_SIZE + ${file_size:-0}))
|
||||
fi
|
||||
|
||||
# Update status file with new counts if jq is available
|
||||
if command -v jq >/dev/null 2>&1; then
|
||||
local temp_file="${METRICS_STATUS_FILE}.tmp"
|
||||
jq --argjson files "$METRICS_FILE_COUNT" \
|
||||
--argjson size "$METRICS_TOTAL_SIZE" \
|
||||
--arg updated "$(date --iso-8601=seconds)" \
|
||||
'.files_processed = $files | .total_size_bytes = $size | .last_updated = $updated' \
|
||||
"$METRICS_STATUS_FILE" > "$temp_file" && mv "$temp_file" "$METRICS_STATUS_FILE"
|
||||
fi
|
||||
|
||||
metrics_debug "File tracked: $(basename "$file_path") ($file_status, ${file_size:-0} bytes)"
|
||||
return 0
|
||||
}
|
||||
|
||||
# Complete backup and finalize metrics
|
||||
metrics_backup_complete() {
|
||||
local final_status="$1" # "success", "failed", "completed_with_errors"
|
||||
local message="$2"
|
||||
|
||||
if [ -z "$METRICS_STATUS_FILE" ] || [ ! -f "$METRICS_STATUS_FILE" ]; then
|
||||
metrics_debug "Warning: No active metrics session to complete"
|
||||
return 1
|
||||
fi
|
||||
|
||||
local end_time=$(date +%s)
|
||||
local duration=$((end_time - METRICS_START_TIME))
|
||||
|
||||
# Create final status file
|
||||
if command -v jq >/dev/null 2>&1; then
|
||||
local temp_file="${METRICS_STATUS_FILE}.tmp"
|
||||
jq --arg status "$final_status" \
|
||||
--arg message "$message" \
|
||||
--arg end_time "$(date -d "@$end_time" --iso-8601=seconds)" \
|
||||
--argjson end_timestamp "$end_time" \
|
||||
--argjson duration "$duration" \
|
||||
--argjson files "$METRICS_FILE_COUNT" \
|
||||
--argjson size "$METRICS_TOTAL_SIZE" \
|
||||
--arg updated "$(date --iso-8601=seconds)" \
|
||||
'.status = $status |
|
||||
.message = $message |
|
||||
.end_time = $end_time |
|
||||
.end_timestamp = $end_timestamp |
|
||||
.duration_seconds = $duration |
|
||||
.files_processed = $files |
|
||||
.total_size_bytes = $size |
|
||||
.current_operation = "Completed" |
|
||||
.last_updated = $updated' \
|
||||
"$METRICS_STATUS_FILE" > "$temp_file" && mv "$temp_file" "$METRICS_STATUS_FILE"
|
||||
else
|
||||
# Fallback - append completion info
|
||||
cat >> "$METRICS_STATUS_FILE" << EOF
|
||||
# COMPLETION: $final_status
|
||||
# MESSAGE: $message
|
||||
# END_TIME: $(date -d "@$end_time" --iso-8601=seconds)
|
||||
# DURATION: ${duration}s
|
||||
# FILES: $METRICS_FILE_COUNT
|
||||
# SIZE: $METRICS_TOTAL_SIZE bytes
|
||||
EOF
|
||||
fi
|
||||
|
||||
metrics_debug "Backup completed: $final_status ($duration seconds, $METRICS_FILE_COUNT files)"
|
||||
|
||||
# Clear global state
|
||||
METRICS_SERVICE=""
|
||||
METRICS_START_TIME=""
|
||||
METRICS_STATUS_FILE=""
|
||||
METRICS_FILE_COUNT=0
|
||||
METRICS_TOTAL_SIZE=0
|
||||
|
||||
return 0
|
||||
}
|
||||
|
||||
# Legacy compatibility functions (for existing integrations)
|
||||
metrics_init() {
|
||||
metrics_backup_start "$1" "${2:-Backup operation}" "${3:-/backup}"
|
||||
}
|
||||
|
||||
metrics_start_backup() {
|
||||
metrics_update_status "running" "Backup in progress"
|
||||
}
|
||||
|
||||
metrics_add_file() {
|
||||
metrics_file_backup_complete "$1" "$3" "$2"
|
||||
}
|
||||
|
||||
metrics_complete_backup() {
|
||||
metrics_backup_complete "$1" "${2:-Backup operation completed}"
|
||||
}
|
||||
|
||||
# Additional compatibility functions for backup-media.sh
|
||||
metrics_status_update() {
|
||||
metrics_update_status "$1" "$2"
|
||||
}
|
||||
|
||||
# Utility function to get current status
|
||||
metrics_get_status() {
|
||||
local service_name="$1"
|
||||
local status_file="$METRICS_ROOT/${service_name}_status.json"
|
||||
|
||||
if [ -f "$status_file" ]; then
|
||||
if command -v jq >/dev/null 2>&1; then
|
||||
jq -r '.status' "$status_file" 2>/dev/null || echo "unknown"
|
||||
else
|
||||
echo "available"
|
||||
fi
|
||||
else
|
||||
echo "never_run"
|
||||
fi
|
||||
}
|
||||
|
||||
# Utility function to list all services with metrics
|
||||
metrics_list_services() {
|
||||
if [ -d "$METRICS_ROOT" ]; then
|
||||
find "$METRICS_ROOT" -name "*_status.json" -exec basename {} \; | sed 's/_status\.json$//' | sort
|
||||
fi
|
||||
}
|
||||
|
||||
metrics_debug "Simplified unified backup metrics library loaded"
|
||||
Reference in New Issue
Block a user