python并发统计s3目录大小

复制代码
from multiprocessing.pool import Pool

import boto3
import os

client = boto3.client('s3')
response = client.list_buckets()
buckets = response['Buckets']

bucket_list = [item['Name'] for item in buckets]


# 从桶中获取二级目录,这里会把object也统计进来,如果不需要可以进行过滤
def get_catalog_list(bucket='bucket'):
    s3_ls_cmd = "aws s3 ls s3://{bucket}".format(bucket=bucket)
    p = os.popen(s3_ls_cmd)
    ret = p.read()
    p.close()
    catalog_list = [item.lstrip() for item in ret.split('\n')]
    s3_key_list = ["s3://{bucket}/{key}".format(bucket=bucket, key=catalog[4:]) for catalog in catalog_list if
                   catalog != '']
    print(s3_key_list)
    return s3_key_list


# 从目录中获取存储大小
def get_catalog_size(catalog='s3://bucket/key/'):
    s3_cmd = "aws s3 ls {catalog} --recursive ".format(catalog=catalog)
    print(s3_cmd)
    p = os.popen(s3_cmd)
    ret = p.read()
    p.close()
    r1 = ret.split('\n')
    r2 = [int(rs.split()[2]) for rs in r1 if rs != '']
    return int(sum(r2) / 1024 / 1024)


def save(bucket='bucket'):
    f = bucket + '.txt'
    with open(f, "a") as file:
        for catalog in get_catalog_list(bucket):
            size = get_catalog_size(catalog)
            str1 = catalog + "," + str(size) + "\n"
            print(str1)
            file.write(str1)


if __name__ == '__main__':
    p = Pool(8)
    for bucket in bucket_list:
        p.apply_async(save, args=(bucket,))
    p.join()
    p.close()
复制代码

 

posted @   Mars.wang  阅读(672)  评论(1编辑  收藏  举报
编辑推荐:
· AI与.NET技术实操系列:基于图像分类模型对图像进行分类
· go语言实现终端里的倒计时
· 如何编写易于单元测试的代码
· 10年+ .NET Coder 心语,封装的思维:从隐藏、稳定开始理解其本质意义
· .NET Core 中如何实现缓存的预热?
阅读排行:
· 25岁的心里话
· 闲置电脑爆改个人服务器(超详细) #公网映射 #Vmware虚拟网络编辑器
· 零经验选手,Compose 一天开发一款小游戏!
· 通过 API 将Deepseek响应流式内容输出到前端
· AI Agent开发,如何调用三方的API Function,是通过提示词来发起调用的吗
点击右上角即可分享
微信分享提示