ResNet18-TensorFlow[通俗易懂]

ResNet18-TensorFlow[通俗易懂]1、ResNet代码#-*-coding:utf-8-*-“””CreatedonWedFeb2619:38:012020@author:HongyongHan”””importtensorflowastffromtensorflowimportkerasfromtensorflow.kerasimportlayers,SequentialclassBasicBlock(layers.Layer):#初始化函数#fil

大家好,又见面了,我是你们的朋友全栈君。

1、ResNet代码

# -*- coding: utf-8 -*-
"""
Created on Wed Feb 26 19:38:01 2020

@author: HongyongHan
"""

import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers,Sequential

class BasicBlock(layers.Layer):
    #初始化函数
    #filter_num:理解为卷积核通道的数目,也就是channel的通道数
    #stride = 1意味着对图片不进行采样
    def __init__(self,filter_num,strides=1):
        #调用母类的初始化方法
        super(BasicBlock,self).__init__()
        #filter_num:卷积核通道的数目.(3,3):卷积核的size
        #padding='same'如果stride等于1,那么输出等于输入。
        #如果stride大于等于2的话,padding=same,会自动补全,
        # 如果等于2的话,输入是32x32,可能输出是14x14,那么如果padding=same
        #会padding输入的大小,使得输出是16x16


        self.conv1=layers.Conv2D(filter_num,(3,3),strides=strides,padding='same')
        self.bn1=layers.BatchNormalization()
        #非线性激活函数
        self.relu=layers.Activation('relu')

        #那么这里设置stride=1,就始终保持一样
        self.conv2=layers.Conv2D(filter_num,(3,3),strides=1,padding='same')
        self.bn2=layers.BatchNormalization()

        if strides != 1:
            #下采样
            self.downsample=Sequential()
            self.downsample.add(layers.Conv2D(filter_num,(1,1),strides=strides))
        else:
            self.downsample=lambda x:x



    def call(self,inputs,training=None):
        #[b,h,w,c]
        out=self.conv1(inputs)
        out=self.bn1(out)
        out=self.relu(out)

        out=self.conv2(out)
        out=self.bn2(out)

        identify=self.downsample(inputs)
        output=layers.add([out,identify])
        #使用tf的函数功能
        output=tf.nn.relu(output)

        return output


class ResNet(keras.Model):
    def __init__(self,layer_dims,num_classes=100):
        #layer_dims:resnet18里面有[2,2,2,2],也就是四个resblock
        #这里指定了一共有多少个resblock层,每个层有多少个basicblock
        #后面在设置blocks的数量的时候,就是用的这里的层的个数
      #一个resblock里面包含了两层basicblock
        #num_classes = 100:就是我们设置的输出的类的个数
        super(ResNet, self).__init__()

        #实现预处理层
        self.stem=Sequential([layers.Conv2D(64,(3,3),strides=(1,1)),
                              layers.BatchNormalization(),
                              layers.Activation('relu'),
                              layers.MaxPool2D(pool_size=(2,2),strides=(1,1),padding='same')
                              ])
        #创建4个res_block
        #这里blocks的数量是layer_dims[0]
        #这里创建的四个res_block与前面的layer_dims:[2,2,2,2]对应
        #将stride设置为2是为了让feature_size越来越小
        self.layer1=self.build_resblock(64,layer_dims[0])
        self.layer2=self.build_resblock(128,layer_dims[1],strides=2)
        self.layer3=self.build_resblock(256,layer_dims[2],strides=2)
        self.layer4=self.build_resblock(512,layer_dims[3],strides=2)


        #out:[b,512,h,w]
        #经过运算之后不能得到h和w的值,
        #使用自适应的方法得到h,w
        #GlobalAveragePooling2D:就是不管你的长和宽是多少
        #会在某个channel上面的长和宽加起来,取均值
        self.avgpool=layers.GlobalAveragePooling2D()
        #创建全连接层
        #这里的Dense是用来分类的,这里输出是之前输出的类别,num_classes
        self.fc=layers.Dense(num_classes)



    def call(self,inputs,training=None):
        #完成前向运算过程
        x = self.stem(inputs)
        x = self.layer1(x)
        x = self.layer2(x)
        x = self.layer3(x)
        x = self.layer4(x)
        #这里已经变成[b,c]的shape,不需要reshape了
        x=self.avgpool(x)
        #这里输出是[b,100]
        x=self.fc(x)

        return x



    def build_resblock(self,filter_num,blocks,strides=1):
        res_blocks=Sequential()
        #添加第一层basicblock
        #可能有下采样的功能的
        res_blocks.add(BasicBlock(filter_num,strides))
        #但是对于后面的basicblock不让有下采样功能
        #从1开始,一直到blocks个
        for _ in range(1,blocks):
            #这样只会在第一个下采样,后面的不在下采样,保持shape不变
            res_blocks.add(BasicBlock(filter_num,strides=1))
        return res_blocks

def resnet18():
    return ResNet([2,2,2,2])

def resnet34():
    return ResNet([3,4,6,3])

2、resnet18_train

# -*- coding: utf-8 -*-
"""
Created on Wed Feb 26 19:38:40 2020

@author: HongyongHan
"""

import tensorflow as tf
from tensorflow.keras import layers,optimizers,datasets,Sequential
import os
from ResNet import resnet18
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
tf.random.set_seed(2345)



#预处理函数
def preprocess(x,y):
    #【-1,1】
    x=2 * tf.cast(x,dtype=tf.float32)/255.-1
    y=tf.cast(y,dtype=tf.int32)
    return x,y

(x,y),(x_test,y_test)=datasets.cifar100.load_data()
#因为y的维度是(64,1)的,需要squeeze掉。
y=tf.squeeze(y,axis=1)
y_test=tf.squeeze(y_test,axis=1)
print(x.shape,y.shape,x_test.shape,y_test.shape)

train_db=tf.data.Dataset.from_tensor_slices((x,y))
train_db=train_db.shuffle(1000).map(preprocess).batch(64)

test_db=tf.data.Dataset.from_tensor_slices((x_test,y_test))
test_db=test_db.map(preprocess).batch(64)

sample=next(iter(train_db))
print('sample',sample[0].shape,sample[1].shape,
      tf.reduce_min(sample[0]),tf.reduce_max(sample[1]))

def main():
    #[b,32,32,3] => [b,1,1,512]
    model=resnet18()
    model.build(input_shape=(None,32,32,3))
    optimizer=optimizers.Adam(lr=1e-3)
    for epoch in range(50):
        for step,(x,y) in enumerate(train_db):
            #这里做一个前向循环,将需要求解梯度放进来
            with tf.GradientTape() as tape:
                #[b,32,32,3] => [b,100]
                logits=model(x)
                #[b] => [b,100]
                y_onehot=tf.one_hot(y,depth=100)
                #compute loss
                loss=tf.losses.categorical_crossentropy(y_onehot,logits,from_logits=True)
                #计算均值,对每个batch的均值进行计算
                loss=tf.reduce_mean(loss)
            #计算gradient
            grads=tape.gradient(loss,model.trainable_variables)
            #传给优化器两个参数:grads和variable,完成梯度更新
            optimizer.apply_gradients(zip(grads,model.trainable_variables))

            if step % 100 == 0:
                print(epoch,step,'losses:',float(loss))
        total_num=0
        total_correct=0
        for x,y in test_db:
            logits=model(x)
            prob=tf.nn.softmax(logits,axis=1)
            pred=tf.argmax(prob,axis=1)
            pred=tf.cast(pred,dtype=tf.int32)
            correct=tf.cast(tf.equal(pred,y),dtype=tf.int32)
            correct=tf.reduce_sum(correct)

            total_num += x.shape[0]
            total_correct += int(correct)
        acc=total_correct / total_num
        print(epoch,'acc:',acc)

if __name__ == '__main__':
    main()



 

版权声明:本文内容由互联网用户自发贡献,该文观点仅代表作者本人。本站仅提供信息存储空间服务,不拥有所有权,不承担相关法律责任。如发现本站有涉嫌侵权/违法违规的内容, 请发送邮件至 举报,一经查实,本站将立刻删除。

发布者:全栈程序员-用户IM,转载请注明出处:https://javaforall.cn/144894.html原文链接:https://javaforall.cn

【正版授权,激活自己账号】: Jetbrains全家桶Ide使用,1年售后保障,每天仅需1毛

【官方授权 正版激活】: 官方授权 正版激活 支持Jetbrains家族下所有IDE 使用个人JB账号...

(0)


相关推荐

发表回复

您的电子邮箱地址不会被公开。

关注全栈程序员社区公众号