写一个清理化本地大文件分段缓存的脚本

#!/bin/bash

url=$1  # 获取命令行输入的 URL
port=$2  # 获取命令行输入的端口(可选)

if [[ -z "$url" ]]; then
    echo "Usage: $0 <url> [port]"
    exit 1
fi

# 提取 URL 路径和查询字符串
url_path=$(echo $url | cut -d '/' -f 4-)

# 检查是否有端口号传入
if [[ -z "$port" ]]; then
    host="http://0"  # 默认端口80
else
    host="http://0:$port"  # 使用指定的端口
fi

# 获取总长度
content_length=$(curl -sI $host/$url_path | grep -i Content-Length | awk -F ": " '{print $2}' | tr -d '\r')

if [[ -z "$content_length" ]]; then
    echo "Failed to retrieve Content-Length for the URL."
    exit 1
fi

# 设置分段大小为 10MB (即 10485760 bytes)
segment_size=10485760

# 计算需要的总迭代次数
let total_segments=($content_length+$segment_size-1)/$segment_size

success_count=0
not_found_count=0
fail_count=0

echo "Starting cache purge for $host/$url_path"

for (( i=0; i<$total_segments; i++ ))
do
    start=$(($i*$segment_size))
    end=$(($start+$segment_size-1))

    if [[ $end -gt $content_length ]]; then
        end=$content_length
    fi

    range="$start-$end"

    # 发送 PURGE 请求到本地
    response=$(curl -s -o /dev/null -w "%{http_code}" -X PURGE -r $range "$host/$url_path")

    # 进度条显示
    percent=$((($i+1)*100/$total_segments))
    printf "\rProgress: [%-50s] %d%%" $(printf "%-${percent}s" | tr ' ' '#') $percent

    # 更新统计计数
    case $response in
        200) let success_count++ ;;
        404) let not_found_count++ ;;
        *)   let fail_count++ ;;
    esac
done

echo -e "\nCache purge completed for $host/$url_path."
echo "Summary:"
echo "Successful purges: $success_count"
echo "Not found (no cache): $not_found_count"
echo "Failed requests: $fail_count"

评论

此博客中的热门博文

在南京见到的农民工午饭情景

购买了iacces for iPad

uptime-kuma部署和批量添加