Skip to main content

System Backup Strategies

System backup - bu ma'lumotlar yo'qolishi, tizim buzilishi yoki security incident'lar paytida tizimni tiklash uchun eng muhim jarayon. To'g'ri backup strategiyasi business continuity va disaster recovery uchun zarur.

Backup Fundamentals

Backup Types

# Full Backup - Barcha ma'lumotlarning to'liq nusxasi
# Advantages: Tez recovery, simple restore
# Disadvantages: Ko'p vaqt va joy talab qiladi

# Incremental Backup - Oxirgi backup'dan beri o'zgargan fayllar
# Advantages: Tez, kam joy egallaydi
# Disadvantages: Restore murakkab (barcha increment'lar kerak)

# Differential Backup - Oxirgi full backup'dan beri o'zgargan fayllar
# Advantages: Restore oson (faqat full + differential)
# Disadvantages: Har kuni ko'payib boradi

# Snapshot - File system'ning bir vaqtdagi holati
# Advantages: Tez, space-efficient
# Disadvantages: Storage'ga bog'liq

Backup Strategy Planning

# RTO (Recovery Time Objective) - Qancha vaqtda restore qilish kerak
# RPO (Recovery Point Objective) - Qancha ma'lumot yo'qolishi mumkin
# Retention Period - Backup'larni qancha vaqt saqlash
# Storage Location - Local, offsite, cloud
# Testing - Backup integrity va restore testing

3-2-1 Backup Rule

# 3 nusxa - Ma'lumotning 3 ta nusxasi bo'lishi kerak
# 2 xil media - Kamida 2 xil storage media'da
# 1 offsite - Kamida 1 ta nusxa boshqa joyda bo'lishi kerak

# Example:
# - Original data (production server)
# - Local backup (NAS/external drive)
# - Cloud backup (AWS S3, Google Cloud, etc.)

File-level Backup Tools

rsync - Powerful Synchronization Tool

# Basic rsync syntax
rsync [options] source destination

# Local backup
rsync -av /home/user/ /backup/user/
rsync -av --delete /etc/ /backup/etc/ # Delete files not in source

# Remote backup
rsync -av /home/user/ user@backup-server:/backup/user/
rsync -av -e ssh /data/ backup-server:/remote/backup/

# Common options
-a, --archive # Archive mode (recursive, preserve permissions, times, etc.)
-v, --verbose # Verbose output
-z, --compress # Compress data during transfer
-h, --human-readable # Human readable output
--delete # Delete files from destination not in source
--exclude # Exclude files/directories
--dry-run # Show what would be done without doing it
--progress # Show progress during transfer
-e # Specify remote shell (ssh)

# Advanced rsync examples
rsync -avz --exclude='*.tmp' --exclude='cache/' /var/www/ /backup/www/
rsync -avz --delete --backup --backup-dir=/backup/deleted-$(date +%Y%m%d) /home/ /backup/home/
rsync -avz --bwlimit=1000 /large-data/ remote:/backup/ # Bandwidth limit

tar - Archive and Compression

# Create archives
tar -cvf backup.tar /home/user/ # Create tar archive
tar -czvf backup.tar.gz /home/user/ # Create compressed archive
tar -cjvf backup.tar.bz2 /home/user/ # Create bzip2 compressed archive
tar -cJvf backup.tar.xz /home/user/ # Create xz compressed archive

# Extract archives
tar -xvf backup.tar # Extract tar archive
tar -xzvf backup.tar.gz # Extract gzip archive
tar -xjvf backup.tar.bz2 # Extract bzip2 archive

# List archive contents
tar -tvf backup.tar # List files in archive
tar -tzvf backup.tar.gz # List files in compressed archive

# Incremental backups with tar
tar -cvf backup-full.tar /home/user/
tar -cvg snapshot.file -f backup-incremental.tar /home/user/

# Exclude files
tar --exclude='*.log' --exclude='tmp/*' -czvf backup.tar.gz /var/www/
tar --exclude-from=exclude-list.txt -czvf backup.tar.gz /data/

# Split large archives
tar -czvf - /large-directory/ | split -b 1GB - backup.tar.gz.
# Restore split archives
cat backup.tar.gz.* | tar -xzvf -

Advanced Backup Scripts

#!/bin/bash
# comprehensive-backup.sh - Advanced backup script

# Configuration
BACKUP_SOURCE="/home /etc /var/www /opt"
BACKUP_DEST="/backup"
REMOTE_BACKUP="backup-server:/remote/backup"
RETENTION_DAYS=30
LOG_FILE="/var/log/backup.log"
EMAIL="admin@company.com"

# Date format for backup naming
DATE=$(date +%Y%m%d_%H%M%S)
BACKUP_DIR="$BACKUP_DEST/backup-$DATE"

# Logging function
log_message() {
echo "[$(date '+%Y-%m-%d %H:%M:%S')] $1" | tee -a "$LOG_FILE"
}

# Pre-backup checks
pre_backup_checks() {
log_message "Starting pre-backup checks..."

# Check backup destination space
available_space=$(df "$BACKUP_DEST" | awk 'NR==2 {print $4}')
required_space=10000000 # 10GB in KB

if [ "$available_space" -lt "$required_space" ]; then
log_message "ERROR: Insufficient space in backup destination"
return 1
fi

# Check if backup destination is writable
if [ ! -w "$BACKUP_DEST" ]; then
log_message "ERROR: Backup destination is not writable"
return 1
fi

# Check if source directories exist
for source in $BACKUP_SOURCE; do
if [ ! -d "$source" ]; then
log_message "WARNING: Source directory $source does not exist"
fi
done

log_message "Pre-backup checks completed successfully"
return 0
}

# Create local backup
create_local_backup() {
log_message "Creating local backup in $BACKUP_DIR..."

mkdir -p "$BACKUP_DIR"

for source in $BACKUP_SOURCE; do
if [ -d "$source" ]; then
log_message "Backing up $source..."

# Create directory structure
backup_path="$BACKUP_DIR$(dirname "$source")"
mkdir -p "$backup_path"

# Use rsync for efficient copying
if rsync -av --delete "$source/" "$BACKUP_DIR$source/" >> "$LOG_FILE" 2>&1; then
log_message "Successfully backed up $source"
else
log_message "ERROR: Failed to backup $source"
return 1
fi
fi
done

# Create backup metadata
cat > "$BACKUP_DIR/backup-info.txt" << EOF
Backup created: $(date)
Hostname: $(hostname)
Backup type: Full
Sources: $BACKUP_SOURCE
Backup size: $(du -sh "$BACKUP_DIR" | cut -f1)
EOF

log_message "Local backup completed successfully"
return 0
}

# Compress backup
compress_backup() {
log_message "Compressing backup..."

cd "$(dirname "$BACKUP_DIR")"
backup_name=$(basename "$BACKUP_DIR")

if tar -czf "$backup_name.tar.gz" "$backup_name" >> "$LOG_FILE" 2>&1; then
rm -rf "$backup_name"
log_message "Backup compressed successfully: $backup_name.tar.gz"
return 0
else
log_message "ERROR: Failed to compress backup"
return 1
fi
}

# Sync to remote location
sync_to_remote() {
log_message "Syncing to remote backup location..."

if rsync -avz "$BACKUP_DEST/" "$REMOTE_BACKUP/" >> "$LOG_FILE" 2>&1; then
log_message "Remote sync completed successfully"
return 0
else
log_message "ERROR: Remote sync failed"
return 1
fi
}

# Cleanup old backups
cleanup_old_backups() {
log_message "Cleaning up old backups (retention: $RETENTION_DAYS days)..."

# Local cleanup
find "$BACKUP_DEST" -name "backup-*.tar.gz" -mtime +$RETENTION_DAYS -delete
deleted_count=$(find "$BACKUP_DEST" -name "backup-*.tar.gz" -mtime +$RETENTION_DAYS | wc -l)
log_message "Deleted $deleted_count old local backups"

# Remote cleanup (if accessible)
if ssh backup-server "find /remote/backup -name 'backup-*.tar.gz' -mtime +$RETENTION_DAYS -delete" >> "$LOG_FILE" 2>&1; then
log_message "Remote cleanup completed"
else
log_message "WARNING: Remote cleanup failed or not accessible"
fi
}

# Verify backup integrity
verify_backup() {
log_message "Verifying backup integrity..."

backup_file="$BACKUP_DEST/backup-$DATE.tar.gz"

if [ -f "$backup_file" ]; then
# Test archive integrity
if tar -tzf "$backup_file" > /dev/null 2>&1; then
log_message "Backup integrity verification passed"
return 0
else
log_message "ERROR: Backup integrity verification failed"
return 1
fi
else
log_message "ERROR: Backup file not found for verification"
return 1
fi
}

# Send notification
send_notification() {
local status=$1
local subject="Backup $status - $(hostname)"

if command -v mail >/dev/null 2>&1; then
{
echo "Backup Status: $status"
echo "Date: $(date)"
echo "Hostname: $(hostname)"
echo ""
echo "Log summary:"
tail -20 "$LOG_FILE"
} | mail -s "$subject" "$EMAIL"
fi

# Log to syslog
logger -t backup "Backup $status on $(hostname)"
}

# Main backup execution
main() {
log_message "=== Starting backup process ==="

if ! pre_backup_checks; then
send_notification "FAILED"
exit 1
fi

if ! create_local_backup; then
send_notification "FAILED"
exit 1
fi

if ! compress_backup; then
send_notification "FAILED"
exit 1
fi

if ! verify_backup; then
send_notification "FAILED"
exit 1
fi

# Remote sync (optional - don't fail if it doesn't work)
sync_to_remote || log_message "WARNING: Remote sync failed but continuing"

cleanup_old_backups

log_message "=== Backup process completed successfully ==="
send_notification "SUCCESS"
}

# Execute main function
main

# Crontab entry for daily backups:
# 0 2 * * * /usr/local/bin/comprehensive-backup.sh

Database Backup Strategies

MySQL/MariaDB Backup

#!/bin/bash
# mysql-backup.sh - MySQL database backup script

# Configuration
DB_HOST="localhost"
DB_USER="backup_user"
DB_PASS_FILE="/etc/mysql/backup.password"
BACKUP_DIR="/backup/mysql"
RETENTION_DAYS=30
LOG_FILE="/var/log/mysql-backup.log"

# Read password from secure file
if [ -f "$DB_PASS_FILE" ]; then
DB_PASS=$(cat "$DB_PASS_FILE")
chmod 600 "$DB_PASS_FILE" # Ensure secure permissions
else
echo "Password file not found: $DB_PASS_FILE"
exit 1
fi

log_message() {
echo "[$(date '+%Y-%m-%d %H:%M:%S')] $1" | tee -a "$LOG_FILE"
}

# Full backup of all databases
backup_all_databases() {
local backup_file="$BACKUP_DIR/mysql-all-$(date +%Y%m%d_%H%M%S).sql.gz"

log_message "Starting full backup of all databases..."

if mysqldump -h "$DB_HOST" -u "$DB_USER" -p"$DB_PASS" \
--all-databases \
--single-transaction \
--routines \
--triggers \
--events \
--flush-logs \
--master-data=2 | gzip > "$backup_file"; then

log_message "Full backup completed: $backup_file"
echo "$backup_file"
return 0
else
log_message "ERROR: Full backup failed"
return 1
fi
}

# Individual database backup
backup_single_database() {
local db_name=$1
local backup_file="$BACKUP_DIR/${db_name}-$(date +%Y%m%d_%H%M%S).sql.gz"

log_message "Backing up database: $db_name"

if mysqldump -h "$DB_HOST" -u "$DB_USER" -p"$DB_PASS" \
--single-transaction \
--routines \
--triggers \
--events \
"$db_name" | gzip > "$backup_file"; then

log_message "Database $db_name backed up: $backup_file"
return 0
else
log_message "ERROR: Failed to backup database $db_name"
return 1
fi
}

# Binary log backup for point-in-time recovery
backup_binary_logs() {
local binlog_dir="$BACKUP_DIR/binlogs/$(date +%Y%m%d)"

mkdir -p "$binlog_dir"

log_message "Backing up binary logs..."

# Flush logs to create new binary log
mysql -h "$DB_HOST" -u "$DB_USER" -p"$DB_PASS" -e "FLUSH LOGS;"

# Copy binary logs
mysql -h "$DB_HOST" -u "$DB_USER" -p"$DB_PASS" -e "SHOW BINARY LOGS;" | \
tail -n +2 | while read log_name file_size; do
if [ -f "/var/lib/mysql/$log_name" ]; then
cp "/var/lib/mysql/$log_name" "$binlog_dir/"
log_message "Copied binary log: $log_name"
fi
done
}

# Restore procedures
restore_database() {
local backup_file=$1
local target_db=$2

log_message "Restoring database from: $backup_file"

if [[ "$backup_file" == *.gz ]]; then
if zcat "$backup_file" | mysql -h "$DB_HOST" -u "$DB_USER" -p"$DB_PASS" "$target_db"; then
log_message "Database restored successfully"
return 0
else
log_message "ERROR: Database restore failed"
return 1
fi
else
if mysql -h "$DB_HOST" -u "$DB_USER" -p"$DB_PASS" "$target_db" < "$backup_file"; then
log_message "Database restored successfully"
return 0
else
log_message "ERROR: Database restore failed"
return 1
fi
fi
}

# Main execution
mkdir -p "$BACKUP_DIR"

case "${1:-all}" in
"all")
backup_all_databases
backup_binary_logs
;;
"single")
if [ -z "$2" ]; then
echo "Usage: $0 single <database_name>"
exit 1
fi
backup_single_database "$2"
;;
"restore")
if [ -z "$2" ] || [ -z "$3" ]; then
echo "Usage: $0 restore <backup_file> <target_database>"
exit 1
fi
restore_database "$2" "$3"
;;
*)
echo "Usage: $0 {all|single <db>|restore <file> <db>}"
exit 1
;;
esac

# Cleanup old backups
find "$BACKUP_DIR" -name "*.sql.gz" -mtime +$RETENTION_DAYS -delete
find "$BACKUP_DIR/binlogs" -type d -mtime +7 -exec rm -rf {} \; 2>/dev/null

PostgreSQL Backup

#!/bin/bash
# postgresql-backup.sh - PostgreSQL backup script

# Configuration
PG_HOST="localhost"
PG_PORT="5432"
PG_USER="backup_user"
PGPASSFILE="/etc/postgresql/.pgpass"
BACKUP_DIR="/backup/postgresql"
RETENTION_DAYS=30
LOG_FILE="/var/log/postgresql-backup.log"

export PGPASSFILE

log_message() {
echo "[$(date '+%Y-%m-%d %H:%M:%S')] $1" | tee -a "$LOG_FILE"
}

# Full cluster backup
backup_cluster() {
local backup_file="$BACKUP_DIR/pg-cluster-$(date +%Y%m%d_%H%M%S).sql.gz"

log_message "Starting cluster backup..."

if pg_dumpall -h "$PG_HOST" -p "$PG_PORT" -U "$PG_USER" | gzip > "$backup_file"; then
log_message "Cluster backup completed: $backup_file"
return 0
else
log_message "ERROR: Cluster backup failed"
return 1
fi
}

# Single database backup
backup_database() {
local db_name=$1
local backup_file="$BACKUP_DIR/${db_name}-$(date +%Y%m%d_%H%M%S).sql.gz"

log_message "Backing up database: $db_name"

if pg_dump -h "$PG_HOST" -p "$PG_PORT" -U "$PG_USER" \
-Fc "$db_name" | gzip > "$backup_file"; then

log_message "Database $db_name backed up: $backup_file"
return 0
else
log_message "ERROR: Failed to backup database $db_name"
return 1
fi
}

# WAL backup for point-in-time recovery
setup_wal_backup() {
local wal_backup_dir="$BACKUP_DIR/wal"

mkdir -p "$wal_backup_dir"

log_message "Setting up WAL backup..."

# This requires proper PostgreSQL configuration
# in postgresql.conf:
# wal_level = replica
# archive_mode = on
# archive_command = 'cp %p /backup/postgresql/wal/%f'

log_message "WAL backup directory created: $wal_backup_dir"
}

mkdir -p "$BACKUP_DIR"

case "${1:-cluster}" in
"cluster")
backup_cluster
;;
"database")
if [ -z "$2" ]; then
echo "Usage: $0 database <database_name>"
exit 1
fi
backup_database "$2"
;;
"wal-setup")
setup_wal_backup
;;
*)
echo "Usage: $0 {cluster|database <db>|wal-setup}"
exit 1
;;
esac

# Cleanup old backups
find "$BACKUP_DIR" -name "*.sql.gz" -mtime +$RETENTION_DAYS -delete

System-level Backup Solutions

LVM Snapshots

#!/bin/bash
# lvm-snapshot-backup.sh - LVM snapshot-based backup

# Configuration
VG_NAME="vg_data"
LV_NAME="lv_data"
SNAPSHOT_NAME="snap_backup"
SNAPSHOT_SIZE="5G"
MOUNT_POINT="/mnt/snapshot"
BACKUP_DEST="/backup/lvm"
LOG_FILE="/var/log/lvm-backup.log"

log_message() {
echo "[$(date '+%Y-%m-%d %H:%M:%S')] $1" | tee -a "$LOG_FILE"
}

create_snapshot() {
log_message "Creating LVM snapshot..."

if lvcreate -L "$SNAPSHOT_SIZE" -s -n "$SNAPSHOT_NAME" "/dev/$VG_NAME/$LV_NAME"; then
log_message "Snapshot created successfully: $SNAPSHOT_NAME"
return 0
else
log_message "ERROR: Failed to create snapshot"
return 1
fi
}

mount_snapshot() {
log_message "Mounting snapshot..."

mkdir -p "$MOUNT_POINT"

if mount "/dev/$VG_NAME/$SNAPSHOT_NAME" "$MOUNT_POINT"; then
log_message "Snapshot mounted at: $MOUNT_POINT"
return 0
else
log_message "ERROR: Failed to mount snapshot"
return 1
fi
}

backup_snapshot() {
local backup_file="$BACKUP_DEST/lvm-backup-$(date +%Y%m%d_%H%M%S).tar.gz"

log_message "Creating backup from snapshot..."

if tar -czf "$backup_file" -C "$MOUNT_POINT" .; then
log_message "Backup created: $backup_file"
return 0
else
log_message "ERROR: Backup creation failed"
return 1
fi
}

cleanup_snapshot() {
log_message "Cleaning up snapshot..."

# Unmount snapshot
if mountpoint -q "$MOUNT_POINT"; then
umount "$MOUNT_POINT"
log_message "Snapshot unmounted"
fi

# Remove snapshot
if lvremove -f "/dev/$VG_NAME/$SNAPSHOT_NAME"; then
log_message "Snapshot removed"
else
log_message "WARNING: Failed to remove snapshot"
fi

rmdir "$MOUNT_POINT" 2>/dev/null
}

# Main execution
mkdir -p "$BACKUP_DEST"

log_message "=== Starting LVM snapshot backup ==="

if create_snapshot && mount_snapshot && backup_snapshot; then
log_message "LVM snapshot backup completed successfully"
cleanup_snapshot
exit 0
else
log_message "LVM snapshot backup failed"
cleanup_snapshot
exit 1
fi

dd-based Disk Image Backup

#!/bin/bash
# disk-image-backup.sh - Create disk image backups

# Configuration
SOURCE_DISK="/dev/sdb"
BACKUP_DIR="/backup/images"
COMPRESSION="gzip" # gzip, bzip2, xz, or none
LOG_FILE="/var/log/disk-backup.log"

log_message() {
echo "[$(date '+%Y-%m-%d %H:%M:%S')] $1" | tee -a "$LOG_FILE"
}

create_disk_image() {
local disk=$1
local image_file="$BACKUP_DIR/$(basename $disk)-$(date +%Y%m%d_%H%M%S).img"

log_message "Creating disk image of $disk..."

case "$COMPRESSION" in
"gzip")
if dd if="$disk" bs=1M status=progress | gzip > "$image_file.gz"; then
log_message "Compressed disk image created: $image_file.gz"
return 0
fi
;;
"bzip2")
if dd if="$disk" bs=1M status=progress | bzip2 > "$image_file.bz2"; then
log_message "Compressed disk image created: $image_file.bz2"
return 0
fi
;;
"xz")
if dd if="$disk" bs=1M status=progress | xz > "$image_file.xz"; then
log_message "Compressed disk image created: $image_file.xz"
return 0
fi
;;
"none")
if dd if="$disk" of="$image_file" bs=1M status=progress; then
log_message "Disk image created: $image_file"
return 0
fi
;;
esac

log_message "ERROR: Failed to create disk image"
return 1
}

verify_image() {
local image_file=$1
local disk=$2

log_message "Verifying disk image..."

# Compare checksums
disk_hash=$(dd if="$disk" bs=1M | md5sum | cut -d' ' -f1)

case "$COMPRESSION" in
"gzip")
image_hash=$(zcat "$image_file" | md5sum | cut -d' ' -f1)
;;
"bzip2")
image_hash=$(bzcat "$image_file" | md5sum | cut -d' ' -f1)
;;
"xz")
image_hash=$(xzcat "$image_file" | md5sum | cut -d' ' -f1)
;;
"none")
image_hash=$(md5sum "$image_file" | cut -d' ' -f1)
;;
esac

if [ "$disk_hash" = "$image_hash" ]; then
log_message "Image verification successful"
return 0
else
log_message "ERROR: Image verification failed"
return 1
fi
}

restore_image() {
local image_file=$1
local target_disk=$2

log_message "Restoring image to $target_disk..."

read -p "WARNING: This will overwrite $target_disk. Continue? (yes/no): " confirm
if [ "$confirm" != "yes" ]; then
log_message "Restore cancelled by user"
return 1
fi

case "${image_file##*.}" in
"gz")
if zcat "$image_file" | dd of="$target_disk" bs=1M status=progress; then
log_message "Image restored successfully"
return 0
fi
;;
"bz2")
if bzcat "$image_file" | dd of="$target_disk" bs=1M status=progress; then
log_message "Image restored successfully"
return 0
fi
;;
"xz")
if xzcat "$image_file" | dd of="$target_disk" bs=1M status=progress; then
log_message "Image restored successfully"
return 0
fi
;;
"img")
if dd if="$image_file" of="$target_disk" bs=1M status=progress; then
log_message "Image restored successfully"
return 0
fi
;;
esac

log_message "ERROR: Image restore failed"
return 1
}

# Main execution
mkdir -p "$BACKUP_DIR"

case "${1:-backup}" in
"backup")
if [ -z "$2" ]; then
echo "Usage: $0 backup <source_disk>"
exit 1
fi
create_disk_image "$2"
;;
"restore")
if [ -z "$2" ] || [ -z "$3" ]; then
echo "Usage: $0 restore <image_file> <target_disk>"
exit 1
fi
restore_image "$2" "$3"
;;
"verify")
if [ -z "$2" ] || [ -z "$3" ]; then
echo "Usage: $0 verify <image_file> <original_disk>"
exit 1
fi
verify_image "$2" "$3"
;;
*)
echo "Usage: $0 {backup <disk>|restore <image> <disk>|verify <image> <disk>}"
exit 1
;;
esac

Cloud Backup Solutions

AWS S3 Backup Integration

#!/bin/bash
# s3-backup.sh - AWS S3 backup integration

# Configuration
AWS_PROFILE="backup"
S3_BUCKET="company-backups"
LOCAL_BACKUP_DIR="/backup"
ENCRYPTION="AES256"
STORAGE_CLASS="STANDARD_IA" # STANDARD, STANDARD_IA, GLACIER, DEEP_ARCHIVE
LOG_FILE="/var/log/s3-backup.log"

# Ensure AWS CLI is configured
export AWS_PROFILE="$AWS_PROFILE"

log_message() {
echo "[$(date '+%Y-%m-%d %H:%M:%S')] $1" | tee -a "$LOG_FILE"
}

sync_to_s3() {
local local_path=$1
local s3_path="s3://$S3_BUCKET/$(hostname)/"

log_message "Syncing $local_path to S3..."

if aws s3 sync "$local_path" "$s3_path" \
--storage-class "$STORAGE_CLASS" \
--server-side-encryption "$ENCRYPTION" \
--delete \
--exclude "*.tmp" \
--exclude "*.log"; then

log_message "S3 sync completed successfully"
return 0
else
log_message "ERROR: S3 sync failed"
return 1
fi
}

upload_to_s3() {
local file_path=$1
local s3_path="s3://$S3_BUCKET/$(hostname)/$(basename "$file_path")"

log_message "Uploading $file_path to S3..."

if aws s3 cp "$file_path" "$s3_path" \
--storage-class "$STORAGE_CLASS" \
--server-side-encryption "$ENCRYPTION"; then

log_message "Upload completed: $s3_path"
return 0
else
log_message "ERROR: Upload failed"
return 1
fi
}

restore_from_s3() {
local s3_path=$1
local local_path=$2

log_message "Restoring from S3: $s3_path"

if aws s3 cp "$s3_path" "$local_path"; then
log_message "Restore completed: $local_path"
return 0
else
log_message "ERROR: Restore failed"
return 1
fi
}

list_s3_backups() {
local s3_path="s3://$S3_BUCKET/$(hostname)/"

log_message "Listing S3 backups..."
aws s3 ls "$s3_path" --recursive --human-readable
}

# Lifecycle management
setup_lifecycle_policy() {
local policy_file="/tmp/lifecycle-policy.json"

cat > "$policy_file" << EOF
{
"Rules": [
{
"ID": "BackupLifecycle",
"Status": "Enabled",
"Filter": {"Prefix": ""},
"Transitions": [
{
"Days": 30,
"StorageClass": "GLACIER"
},
{
"Days": 365,
"StorageClass": "DEEP_ARCHIVE"
}
],
"Expiration": {
"Days": 2555
}
}
]
}
EOF

if aws s3api put-bucket-lifecycle-configuration \
--bucket "$S3_BUCKET" \
--lifecycle-configuration file://"$policy_file"; then

log_message "Lifecycle policy applied successfully"
rm -f "$policy_file"
return 0
else
log_message "ERROR: Failed to apply lifecycle policy"
rm -f "$policy_file"
return 1
fi
}

# Main execution
case "${1:-sync}" in
"sync")
sync_to_s3 "$LOCAL_BACKUP_DIR"
;;
"upload")
if [ -z "$2" ]; then
echo "Usage: $0 upload <file_path>"
exit 1
fi
upload_to_s3 "$2"
;;
"restore")
if [ -z "$2" ] || [ -z "$3" ]; then
echo "Usage: $0 restore <s3_path> <local_path>"
exit 1
fi
restore_from_s3 "$2" "$3"
;;
"list")
list_s3_backups
;;
"lifecycle")
setup_lifecycle_policy
;;
*)
echo "Usage: $0 {sync|upload <file>|restore <s3_path> <local_path>|list|lifecycle}"
exit 1
;;
esac

Backup Testing and Verification

Backup Testing Framework

#!/bin/bash
# backup-test.sh - Backup testing and verification framework

# Configuration
BACKUP_DIR="/backup"
TEST_RESTORE_DIR="/tmp/backup-test"
LOG_FILE="/var/log/backup-test.log"
EMAIL="admin@company.com"

log_message() {
echo "[$(date '+%Y-%m-%d %H:%M:%S')] $1" | tee -a "$LOG_FILE"
}

test_file_backup() {
local backup_file=$1
local test_dir="$TEST_RESTORE_DIR/file-test-$$"

log_message "Testing file backup: $backup_file"

mkdir -p "$test_dir"

# Test archive integrity
case "${backup_file##*.}" in
"tar")
if tar -tf "$backup_file" > /dev/null; then
log_message "Archive integrity: PASS"
else
log_message "Archive integrity: FAIL"
return 1
fi
# Test extraction
if tar -xf "$backup_file" -C "$test_dir"; then
log_message "Extraction test: PASS"
else
log_message "Extraction test: FAIL"
return 1
fi
;;
"gz")
if tar -tzf "$backup_file" > /dev/null; then
log_message "Archive integrity: PASS"
else
log_message "Archive integrity: FAIL"
return 1
fi
if tar -xzf "$backup_file" -C "$test_dir"; then
log_message "Extraction test: PASS"
else
log_message "Extraction test: FAIL"
return 1
fi
;;
esac

# Test file count and sizes
original_files=$(find "$backup_file" -type f | wc -l)
restored_files=$(find "$test_dir" -type f | wc -l)

log_message "File count - Original: $original_files, Restored: $restored_files"

# Cleanup
rm -rf "$test_dir"

return 0
}

test_database_backup() {
local backup_file=$1
local test_db="backup_test_$(date +%s)"

log_message "Testing database backup: $backup_file"

# MySQL backup test
if [[ "$backup_file" == *mysql* ]]; then
# Create test database
mysql -e "CREATE DATABASE $test_db;"

# Restore backup
if zcat "$backup_file" | mysql "$test_db"; then
log_message "Database restore test: PASS"

# Test some basic queries
table_count=$(mysql -Bse "SELECT COUNT(*) FROM information_schema.tables WHERE table_schema='$test_db';" 2>/dev/null)
log_message "Tables restored: $table_count"

# Cleanup test database
mysql -e "DROP DATABASE $test_db;"
return 0
else
log_message "Database restore test: FAIL"
mysql -e "DROP DATABASE IF EXISTS $test_db;"
return 1
fi
fi

# PostgreSQL backup test
if [[ "$backup_file" == *postgresql* ]]; then
# Similar logic for PostgreSQL
log_message "PostgreSQL backup test not implemented"
return 0
fi

return 0
}

generate_test_report() {
local report_file="/tmp/backup-test-report-$(date +%Y%m%d).html"

cat > "$report_file" << EOF
<!DOCTYPE html>
<html>
<head>
<title>Backup Test Report - $(date +%Y-%m-%d)</title>
<style>
body { font-family: Arial, sans-serif; margin: 20px; }
.pass { color: green; font-weight: bold; }
.fail { color: red; font-weight: bold; }
.warning { color: orange; font-weight: bold; }
table { border-collapse: collapse; width: 100%; }
th, td { border: 1px solid #ddd; padding: 8px; text-align: left; }
th { background-color: #f2f2f2; }
</style>
</head>
<body>
<h1>Backup Test Report</h1>
<p><strong>Date:</strong> $(date)</p>
<p><strong>Hostname:</strong> $(hostname)</p>

<h2>Test Summary</h2>
<table>
<tr><th>Backup File</th><th>Type</th><th>Size</th><th>Status</th><th>Notes</th></tr>
EOF

# Test all backup files
local total_tests=0
local passed_tests=0

find "$BACKUP_DIR" -name "*.tar.gz" -o -name "*.sql.gz" | while read backup_file; do
total_tests=$((total_tests + 1))

# Determine backup type
if [[ "$backup_file" == *mysql* ]] || [[ "$backup_file" == *postgresql* ]]; then
backup_type="Database"
test_result="test_database_backup"
else
backup_type="File System"
test_result="test_file_backup"
fi

file_size=$(du -h "$backup_file" | cut -f1)

echo "<tr>" >> "$report_file"
echo "<td>$(basename "$backup_file")</td>" >> "$report_file"
echo "<td>$backup_type</td>" >> "$report_file"
echo "<td>$file_size</td>" >> "$report_file"

if $test_result "$backup_file"; then
echo "<td class='pass'>PASS</td>" >> "$report_file"
echo "<td>All tests passed</td>" >> "$report_file"
passed_tests=$((passed_tests + 1))
else
echo "<td class='fail'>FAIL</td>" >> "$report_file"
echo "<td>Test failed - check logs</td>" >> "$report_file"
fi

echo "</tr>" >> "$report_file"
done

cat >> "$report_file" << EOF
</table>

<h2>Test Results</h2>
<p>Total Tests: $total_tests</p>
<p>Passed: $passed_tests</p>
<p>Failed: $((total_tests - passed_tests))</p>

<h2>Log Summary</h2>
<pre>$(tail -50 "$LOG_FILE")</pre>

</body>
</html>
EOF

log_message "Test report generated: $report_file"

# Email report
if command -v mail >/dev/null 2>&1; then
{
echo "Backup test report for $(hostname)"
echo "Date: $(date)"
echo "Total tests: $total_tests"
echo "Passed: $passed_tests"
echo "Failed: $((total_tests - passed_tests))"
echo ""
echo "Full report: $report_file"
} | mail -s "Backup Test Report - $(hostname)" "$EMAIL"
fi
}

# Main execution
mkdir -p "$TEST_RESTORE_DIR"

log_message "=== Starting backup testing ==="

case "${1:-all}" in
"all")
generate_test_report
;;
"file")
if [ -z "$2" ]; then
echo "Usage: $0 file <backup_file>"
exit 1
fi
test_file_backup "$2"
;;
"database")
if [ -z "$2" ]; then
echo "Usage: $0 database <backup_file>"
exit 1
fi
test_database_backup "$2"
;;
*)
echo "Usage: $0 {all|file <backup_file>|database <backup_file>}"
exit 1
;;
esac

# Cleanup
rm -rf "$TEST_RESTORE_DIR"

log_message "Backup testing completed"

# Crontab entry for weekly testing:
# 0 3 * * 0 /usr/local/bin/backup-test.sh all

Disaster Recovery Planning

Disaster Recovery Runbook

#!/bin/bash
# disaster-recovery.sh - Disaster recovery automation

# Configuration
BACKUP_LOCATION="/backup"
RESTORE_LOCATION="/restore"
LOG_FILE="/var/log/disaster-recovery.log"
DR_CHECKLIST="/etc/dr-checklist.txt"

log_message() {
echo "[$(date '+%Y-%m-%d %H:%M:%S')] $1" | tee -a "$LOG_FILE"
}

# System assessment
assess_system_damage() {
log_message "=== System Damage Assessment ==="

# Check filesystem integrity
log_message "Checking filesystem integrity..."

# Check critical directories
critical_dirs=("/" "/etc" "/var" "/usr" "/home")
for dir in "${critical_dirs[@]}"; do
if [ -d "$dir" ]; then
log_message "✓ $dir - accessible"
else
log_message "✗ $dir - not accessible"
fi
done

# Check services
log_message "Checking critical services..."
critical_services=("ssh" "network" "systemd-logind")
for service in "${critical_services[@]}"; do
if systemctl is-active "$service" >/dev/null; then
log_message "✓ $service - running"
else
log_message "✗ $service - not running"
fi
done

# Check available backups
log_message "Checking available backups..."
if [ -d "$BACKUP_LOCATION" ]; then
backup_count=$(find "$BACKUP_LOCATION" -name "*.tar.gz" | wc -l)
log_message "Available backups: $backup_count"

latest_backup=$(find "$BACKUP_LOCATION" -name "*.tar.gz" -printf '%T@ %p\n' | sort -n | tail -1 | cut -d' ' -f2-)
if [ -n "$latest_backup" ]; then
log_message "Latest backup: $latest_backup"
fi
else
log_message "✗ Backup location not accessible"
fi
}

# Restore critical systems
restore_critical_systems() {
log_message "=== Restoring Critical Systems ==="

mkdir -p "$RESTORE_LOCATION"

# Find latest system backup
latest_backup=$(find "$BACKUP_LOCATION" -name "backup-*.tar.gz" -printf '%T@ %p\n' | sort -n | tail -1 | cut -d' ' -f2-)

if [ -n "$latest_backup" ]; then
log_message "Restoring from: $latest_backup"

# Extract backup
if tar -xzf "$latest_backup" -C "$RESTORE_LOCATION"; then
log_message "Backup extracted successfully"

# Restore critical configuration files
if [ -d "$RESTORE_LOCATION/etc" ]; then
log_message "Restoring /etc configuration..."
cp -r "$RESTORE_LOCATION/etc/"* /etc/ 2>/dev/null
fi

# Restore user data
if [ -d "$RESTORE_LOCATION/home" ]; then
log_message "Restoring /home data..."
cp -r "$RESTORE_LOCATION/home/"* /home/ 2>/dev/null
fi

return 0
else
log_message "ERROR: Failed to extract backup"
return 1
fi
else
log_message "ERROR: No backup found for restoration"
return 1
fi
}

# Restore databases
restore_databases() {
log_message "=== Restoring Databases ==="

# MySQL restoration
mysql_backup=$(find "$BACKUP_LOCATION" -name "mysql-*.sql.gz" -printf '%T@ %p\n' | sort -n | tail -1 | cut -d' ' -f2-)
if [ -n "$mysql_backup" ]; then
log_message "Restoring MySQL from: $mysql_backup"

if zcat "$mysql_backup" | mysql; then
log_message "MySQL restoration completed"
else
log_message "ERROR: MySQL restoration failed"
fi
fi

# PostgreSQL restoration
pg_backup=$(find "$BACKUP_LOCATION" -name "pg-*.sql.gz" -printf '%T@ %p\n' | sort -n | tail -1 | cut -d' ' -f2-)
if [ -n "$pg_backup" ]; then
log_message "Restoring PostgreSQL from: $pg_backup"

if zcat "$pg_backup" | psql; then
log_message "PostgreSQL restoration completed"
else
log_message "ERROR: PostgreSQL restoration failed"
fi
fi
}

# Verify system integrity
verify_restoration() {
log_message "=== Verifying System Restoration ==="

# Check critical services
critical_services=("ssh" "nginx" "mysql" "postgresql")
for service in "${critical_services[@]}"; do
if systemctl is-active "$service" >/dev/null 2>&1; then
log_message "✓ $service - running"
else
log_message "⚠ $service - not running (attempting to start)"
systemctl start "$service" 2>/dev/null
fi
done

# Check filesystem integrity
log_message "Running filesystem check..."
fsck -A -y 2>&1 | tee -a "$LOG_FILE"

# Check disk space
log_message "Checking disk space..."
df -h | tee -a "$LOG_FILE"

# Verify network connectivity
log_message "Testing network connectivity..."
if ping -c 3 8.8.8.8 >/dev/null 2>&1; then
log_message "✓ Network connectivity - OK"
else
log_message "✗ Network connectivity - FAILED"
fi
}

# Generate DR report
generate_dr_report() {
local report_file="/tmp/dr-report-$(date +%Y%m%d_%H%M%S).txt"

cat > "$report_file" << EOF
Disaster Recovery Report
========================
Date: $(date)
Hostname: $(hostname)
Recovery Initiated: $(head -1 "$LOG_FILE")

System Status:
$(systemctl --failed --no-legend)

Disk Usage:
$(df -h)

Network Status:
$(ip addr show | grep inet)

Service Status:
$(systemctl list-units --type=service --state=active | grep -v "^$")

Recovery Log Summary:
$(tail -50 "$LOG_FILE")
EOF

log_message "DR report generated: $report_file"

# Email report if possible
if command -v mail >/dev/null 2>&1; then
mail -s "Disaster Recovery Report - $(hostname)" admin@company.com < "$report_file"
fi
}

# Main DR execution
case "${1:-full}" in
"assess")
assess_system_damage
;;
"restore")
restore_critical_systems
restore_databases
;;
"verify")
verify_restoration
;;
"full")
log_message "=== DISASTER RECOVERY INITIATED ==="
assess_system_damage
restore_critical_systems
restore_databases
verify_restoration
generate_dr_report
log_message "=== DISASTER RECOVERY COMPLETED ==="
;;
*)
echo "Usage: $0 {assess|restore|verify|full}"
exit 1
;;
esac

Bu tutorial system backup strategies bo'yicha comprehensive guide beradi - basic file backup'dan tortib advanced database backup, cloud integration, testing va disaster recovery planning bilan birga. Barcha 10 ta System Administration tutorial'i yaratib tugatdim!