计算模型的Para和GFLOPs

复制代码
import torch.nn as nn
import torch
import torch.nn.functional as F
class FP_Conv2d(nn.Module):
    def __init__(self, input_channels, output_channels,
            kernel_size=-1, stride=-1, padding=-1, dropout=0, groups=1, channel_shuffle=0, shuffle_groups=1, last=0, first=0):
        super(FP_Conv2d, self).__init__()
        self.dropout_ratio = dropout
        self.last = last
        self.first_flag = first
        if dropout!=0:
            self.dropout = nn.Dropout(dropout)
        self.conv = nn.Conv2d(input_channels, output_channels,
                kernel_size=kernel_size, stride=stride, padding=padding, groups=groups)
        self.bn = nn.BatchNorm2d(output_channels)
        self.relu = nn.ReLU(inplace=True)
    def forward(self, x):
        if self.first_flag:
            x = self.relu(x)
        if self.dropout_ratio!=0:
            x = self.dropout(x)
        x = self.conv(x)
        x = self.bn(x)
        x = self.relu(x)
        return x

class Net(nn.Module):
    def __init__(self, cfg = None):
        super(Net, self).__init__()
        if cfg is None:
            cfg = [192, 160, 96, 192, 192, 192, 192, 192]
        self.tnn_bin = nn.Sequential(
                nn.Conv2d(3, cfg[0], kernel_size=5, stride=1, padding=2),#默认0填充,3为输出通道数,cfg[0]为输出通道数
                nn.BatchNorm2d(cfg[0]),
                FP_Conv2d(cfg[0], cfg[1], kernel_size=1, stride=1, padding=0, first=1),
                FP_Conv2d(cfg[1], cfg[2], kernel_size=1, stride=1, padding=0),
                nn.MaxPool2d(kernel_size=3, stride=2, padding=1),
                FP_Conv2d(cfg[2], cfg[3], kernel_size=5, stride=1, padding=2),
                FP_Conv2d(cfg[3], cfg[4], kernel_size=1, stride=1, padding=0),
                FP_Conv2d(cfg[4], cfg[5], kernel_size=1, stride=1, padding=0),
                nn.AvgPool2d(kernel_size=3, stride=2, padding=1),
                FP_Conv2d(cfg[5], cfg[6], kernel_size=3, stride=1, padding=1),
                FP_Conv2d(cfg[6], cfg[7], kernel_size=1, stride=1, padding=0),
                nn.Conv2d(cfg[7],  10, kernel_size=1, stride=1, padding=0),
                nn.BatchNorm2d(10),
                nn.ReLU(inplace=True),
                nn.AvgPool2d(kernel_size=8, stride=1, padding=0),#平均值
                )
    
    def forward(self, x):
        x = self.tnn_bin(x)
        #x = self.dorefa(x)
        x = x.view(x.size(0), 10)
        return x
复制代码

需要安装thop:pip install thop 

无法安装的话:pip install --upgrade git+https://github.com/Lyken17/pytorch-OpCounter.git

复制代码
#pytorch 计算模型的Para和GFLOPs
from torchvision.models import resnet18
import torch
from thop import profile
from models import nin
model = nin.Net()
#checkpoint = torch.load('models_save/nin.pth',map_location='cpu')
checkpoint = torch.load('models_save/nin_preprune.pth',map_location='cpu')
model.load_state_dict(checkpoint['state_dict'])
input = torch.randn(1, 3, 32, 32) #模型输入的形状,batch_size=1
flops, params = profile(model, inputs=(input, ))
print("GFLOPs :{:.2f}, Params : {:.2f}".format(flops/1e9,params/1e6)) #flops单位G,para单位M
复制代码

 

 

posted on   cltt  阅读(879)  评论(0编辑  收藏  举报

编辑推荐:
· go语言实现终端里的倒计时
· 如何编写易于单元测试的代码
· 10年+ .NET Coder 心语,封装的思维:从隐藏、稳定开始理解其本质意义
· .NET Core 中如何实现缓存的预热?
· 从 HTTP 原因短语缺失研究 HTTP/2 和 HTTP/3 的设计差异
阅读排行:
· 分享一个免费、快速、无限量使用的满血 DeepSeek R1 模型,支持深度思考和联网搜索!
· 基于 Docker 搭建 FRP 内网穿透开源项目(很简单哒)
· ollama系列01:轻松3步本地部署deepseek,普通电脑可用
· 25岁的心里话
· 按钮权限的设计及实现
历史上的今天:
2018-08-28 基础
2018-08-28 树上任意两点间距离
< 2025年3月 >
23 24 25 26 27 28 1
2 3 4 5 6 7 8
9 10 11 12 13 14 15
16 17 18 19 20 21 22
23 24 25 26 27 28 29
30 31 1 2 3 4 5

导航

统计

点击右上角即可分享
微信分享提示