FLOPs:是floating point operations的缩写(s表复数),意指浮点运算数,理解为计算量。可以用来衡量算法/模型的复杂度
网上有很多根据feature map和卷积核大小计算的方法,比如:https://zhuanlan.zhihu.com/p/137719986
这里就不再仔细展开了
最近在网上发现py函数能直接计算FLOPs,举个例子:
import tensorflow as tf
import keras.backend as K
import keras
from keras import *
from keras.models import Sequential
from keras.layers import Dense, Activation, Flatten, Dropout, BatchNormalization
from keras.layers import Conv2D, MaxPooling2D
from keras.models import Model
from pydantic import BaseModel
from keras import regularizers
from keras.callbacks import LearningRateScheduler
from keras_flops import get_flops #这里!
#定义一个CNN模型
num_classes = 10
weight_decay = 1e-4
model = Sequential()
model.add(Conv2D(32, (3,3), padding='same', kernel_regularizer=regularizers.l2(weight_decay), input_shape=(32,32,3)))
model.add(Activation('elu'))
model.add(BatchNormalization())
model.add(Conv2D(32, (3,3), padding='same', kernel_regularizer=regularizers.l2(weight_decay)))
model.add(Activation('elu'))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Dropout(0.2))
model.add(Conv2D(64, (3,3), padding='same', kernel_regularizer=regularizers.l2(weight_decay)))
model.add(Activation('elu'))
model.add(BatchNormalization())
model.add(Conv2D(64, (3,3), padding='same', kernel_regularizer=regularizers.l2(weight_decay)))
model.add(Activation('elu'))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Dropout(0.3))
model.add(Conv2D(128, (3,3), padding='same', kernel_regularizer=regularizers.l2(weight_decay)))
model.add(Activation('elu'))
model.add(BatchNormalization())
model.add(Conv2D(128, (3,3), padding='same', kernel_regularizer=regularizers.l2(weight_decay)))
model.add(Activation('elu'))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Dropout(0.4))
model.add(Flatten())
model.add(Dense(num_classes, activation='softmax'))
#调用函数接口计算
print(get_flops(model,batch_size=1))
输出
node name | # float_ops
_TFProfRoot (--/77.71m flops)
sequential/conv2d_1/Conv2D (18.87m/18.87m flops)
sequential/conv2d_5/Conv2D (18.87m/18.87m flops)
sequential/conv2d_3/Conv2D (18.87m/18.87m flops)
sequential/conv2d_2/Conv2D (9.44m/9.44m flops)
sequential/conv2d_4/Conv2D (9.44m/9.44m flops)
sequential/conv2d/Conv2D (1.77m/1.77m flops)
sequential/batch_normalization/FusedBatchNormV3 (65.73k/65.73k flops)
sequential/batch_normalization_1/FusedBatchNormV3 (65.73k/65.73k flops)
sequential/dense/MatMul (40.96k/40.96k flops)
sequential/batch_normalization_3/FusedBatchNormV3 (33.15k/33.15k flops)
sequential/batch_normalization_2/FusedBatchNormV3 (33.15k/33.15k flops)
sequential/conv2d/BiasAdd (32.77k/32.77k flops)
sequential/conv2d_1/BiasAdd (32.77k/32.77k flops)
sequential/max_pooling2d/MaxPool (32.77k/32.77k flops)
sequential/batch_normalization_4/FusedBatchNormV3 (17.15k/17.15k flops)
sequential/batch_normalization_5/FusedBatchNormV3 (17.15k/17.15k flops)
sequential/conv2d_3/BiasAdd (16.38k/16.38k flops)
sequential/max_pooling2d_1/MaxPool (16.38k/16.38k flops)
sequential/conv2d_2/BiasAdd (16.38k/16.38k flops)
sequential/conv2d_5/BiasAdd (8.19k/8.19k flops)
sequential/conv2d_4/BiasAdd (8.19k/8.19k flops)
sequential/max_pooling2d_2/MaxPool (8.19k/8.19k flops)
sequential/dense/Softmax (50/50 flops)
sequential/dense/BiasAdd (10/10 flops)
======================End of Report==========================
77712060
网友评论