大家好,又见面了,我是你们的朋友全栈君。
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers, Sequential, Model, datasets, optimizers
# 自定义的预处理函数
def preprocess(x, y):
# 调用此函数时会自动传入x,y对象,shape为[b,28,28],[b]
# 标准化到0-1
x = 2*tf.cast(x, dtype=tf.float32) / 255.-1
# 转成整型张量
y = tf.cast(y, dtype=tf.int32)
# 返回的x,y将替换传入的x,y参数,从而实现数据的预处理功能
return x, y
# 在线下载,加载CIFAR10数据集
(x,y),(x_test,y_test)= datasets.cifar10.load_data()
# 删除y的一个不必要的维度,[b,1] → [b]
y= tf.squeeze(y,axis= 1)
y_test= tf.squeeze(y_test, axis= 1)
# 打印训练集和测试集的形状
# print(x.shape,y.shape, x_test.shape, y_test.shape)
# 构建训练集对象,随机打乱,预处理,批量化
train_db= tf.data.Dataset.from_tensor_slices((x,y))
train_db= train_db.shuffle(1000).map(preprocess).batch(512)
# 构建测试集对象,预处理,批量化
test_db= tf.data.Dataset.from_tensor_slices((x_test,y_test))
test_db= test_db.map(preprocess).batch(512)
# 从训练集中采样一个Batch,并观察
sample= next(iter(train_db))
# print(‘sample:’,sample[0].shape,sample[1].shape,tf.reduce_min(sample[0]),tf.reduce_max(sample[0]))
class BasicBlock(layers.Layer):
# 残差模块
def __init__(self, filter_num, stride= 1):
super(BasicBlock, self).__init__()
#第一个卷积单元
self.conv1= layers.Conv2D(filter_num, kernel_size=(3,3), strides= stride, padding= ‘same’)
self.bn1= layers.BatchNormalization()
self.relu= layers.Activation(‘relu’)
# 第二个卷积单元
self.conv2= layers.Conv2D(filter_num, kernel_size=(3,3), strides= 1, padding= ‘same’ )
self.bn2= layers.BatchNormalization()
# 通过1*1卷积完成shape匹配
if stride != 1:
self.downsample= Sequential()
self.downsample.add(layers.Conv2D(filter_num, kernel_size= (1,1), strides= stride))
else: # shape匹配,直接短接
self.downsample= lambda x:x
def call(self, inputs, training= None):
# 前向计算函数
# [b,h,w,c], 通过第一个卷积单元
out= self.conv1(inputs)
out= self.bn1(out)
out= self.relu(out)
# 通过第二个卷积单元
out= self.conv2(out)
out= self.bn2(out)
# 通过identity模块
identity= self.downsample(inputs)
# 两条路径输出直接相加
output= layers.add([out,identity])
output= tf.nn.relu(output)
return output
class ResNet(Model):
def __init__(self, layer_dims, num_classes= 10): #[2,2,2,2]
super(ResNet, self).__init__()
# 根网络,预处理
self.stem= Sequential([
layers.Conv2D(64, kernel_size= (3,3), strides= (1,1)),
layers.BatchNormalization(),
layers.Activation(‘relu’),
layers.MaxPool2D(pool_size=(2,2), strides=(1,1), padding= ‘same’)
])
# 堆叠4个Block,每个Block包含了多个BasicBlock,设置步长不一样
self.layer1= self.build_resblock(64, layer_dims[0])
self.layer2= self.build_resblock(128, layer_dims[1], stride= 2)
self.layer3= self.build_resblock(256, layer_dims[2], stride= 2)
self.layer4= self.build_resblock(512, layer_dims[3], stride= 2)
# 通过Pooling层将高宽降低为1*1
self.avgpool= layers.GlobalAveragePooling2D()
# 最后连接一个全连接层分类
self.fc= layers.Dense(num_classes)
def build_resblock(self, filter_num, blocks, stride= 1):
# 辅助函数,堆叠filter_num个BasicBlock
res_blocks= Sequential()
# 只有第一个BasicBlock的步长可能不为1, 实现下采样
res_blocks.add(BasicBlock(filter_num, stride))
# 其他BasicBlock步长都为1
for _ in range(1, blocks):
res_blocks.add(BasicBlock(filter_num, stride= 1))
return res_blocks
def call(self, inputs, training= None):
# 前向计算函数:通过根网络
x= self.stem(inputs)
# 一次通过4个模块
x= self.layer1(x)
x= self.layer2(x)
x= self.layer3(x)
x= self.layer4(x)
# 通过池化层
x= self.avgpool(x)
# 通过全连接层
x= self.fc(x)
return x
def resnet18():
# 通过调整模块内部BasicBlock的数量和配置实现不同的ResNet
return ResNet([2,2,2,2])
# def resnet34():
# # 通过调整模块内部BasicBlock的数量和配置实现不同的ResNet
# return ResNet([3,4,6,3])
model = resnet18() # ResNet18网络
model.build(input_shape=(None, 32, 32, 3))
# model.summary() # 统计网络参数
def main():
optimizer = optimizers.Adam(learning_rate=1e-4)
for epoch in range(10):
for step, (x,y) in enumerate(train_db):
with tf.GradientTape() as tape:
# [b, 32, 32, 3] => [b, 1, 1, 512]
logits= model(x)
y_onehot = tf.one_hot(y, depth=10)
# compute loss
loss = tf.losses.categorical_crossentropy(y_onehot, logits, from_logits=True)
loss = tf.reduce_mean(loss)
# 对所有参数求梯度
grads= tape.gradient(loss, model.trainable_variables)
# 自动更新
optimizer.apply_gradients(zip(grads,model.trainable_variables))
if step %10 == 0:
print(epoch, step, ‘loss:’, float(loss))
total_num = 0
total_correct = 0
for x,y in test_db:
# out = model(x)
# out = tf.reshape(out, [-1, 512])
logits = model(x)
prob = tf.nn.softmax(logits, axis=1)
pred = tf.argmax(prob, axis=1)
pred = tf.cast(pred, dtype=tf.int32)
correct = tf.cast(tf.equal(pred, y), dtype=tf.int32)
correct = tf.reduce_sum(correct)
total_num += x.shape[0]
total_correct += int(correct)
acc = total_correct / total_num
print(epoch, ‘acc:’, acc)
if __name__ == ‘__main__’:
main()
发布者:全栈程序员-用户IM,转载请注明出处:https://javaforall.cn/141598.html原文链接:https://javaforall.cn
【正版授权,激活自己账号】: Jetbrains全家桶Ide使用,1年售后保障,每天仅需1毛
【官方授权 正版激活】: 官方授权 正版激活 支持Jetbrains家族下所有IDE 使用个人JB账号...