resnet18模型

resnet18模型结构ResNet18((conv1):Conv2D(3,64,kernel_size=[3,3],padding=1,data_format=NCHW)(bn1):BatchNorm2D(num_features=64,momentum=0.9,epsilon=1e-05)(relu):ReLU()(avagPool):AdaptiveAvgPool2D(output_size=1)(classifier):Linear(in_features=512

大家好,又见面了,我是你们的朋友全栈君。

睡觉

结构

ResNet18(
(conv1): Conv2D(3, 64, kernel_size=[3, 3], padding=1, data_format=NCHW)
(bn1): BatchNorm2D(num_features=64, momentum=0.9, epsilon=1e-05)
(relu): ReLU()
(avagPool): AdaptiveAvgPool2D(output_size=1)
(classifier): Linear(in_features=512, out_features=1000, dtype=float32)
(layer1): Sequential(
(0): Block(
(conv1): Conv2D(64, 64, kernel_size=[3, 3], padding=1, data_format=NCHW)
(bn1): BatchNorm2D(num_features=64, momentum=0.9, epsilon=1e-05)
(conv2): Conv2D(64, 64, kernel_size=[3, 3], padding=1, data_format=NCHW)
(bn2): BatchNorm2D(num_features=64, momentum=0.9, epsilon=1e-05)
(relu): ReLU()
(downsample): Identity()
)
(1): Block(
(conv1): Conv2D(64, 64, kernel_size=[3, 3], padding=1, data_format=NCHW)
(bn1): BatchNorm2D(num_features=64, momentum=0.9, epsilon=1e-05)
(conv2): Conv2D(64, 64, kernel_size=[3, 3], padding=1, data_format=NCHW)
(bn2): BatchNorm2D(num_features=64, momentum=0.9, epsilon=1e-05)
(relu): ReLU()
(downsample): Identity()
)
)
(layer2): Sequential(
(0): Block(
(conv1): Conv2D(64, 128, kernel_size=[3, 3], stride=[2, 2], padding=1, data_format=NCHW)
(bn1): BatchNorm2D(num_features=128, momentum=0.9, epsilon=1e-05)
(conv2): Conv2D(128, 128, kernel_size=[3, 3], padding=1, data_format=NCHW)
(bn2): BatchNorm2D(num_features=128, momentum=0.9, epsilon=1e-05)
(relu): ReLU()
(downsample): Sequential(
(0): Conv2D(64, 128, kernel_size=[1, 1], stride=[2, 2], data_format=NCHW)
(1): BatchNorm2D(num_features=128, momentum=0.9, epsilon=1e-05)
)
)
(1): Block(
(conv1): Conv2D(128, 128, kernel_size=[3, 3], padding=1, data_format=NCHW)
(bn1): BatchNorm2D(num_features=128, momentum=0.9, epsilon=1e-05)
(conv2): Conv2D(128, 128, kernel_size=[3, 3], padding=1, data_format=NCHW)
(bn2): BatchNorm2D(num_features=128, momentum=0.9, epsilon=1e-05)
(relu): ReLU()
(downsample): Identity()
)
)
(layer3): Sequential(
(0): Block(
(conv1): Conv2D(128, 256, kernel_size=[3, 3], stride=[2, 2], padding=1, data_format=NCHW)
(bn1): BatchNorm2D(num_features=256, momentum=0.9, epsilon=1e-05)
(conv2): Conv2D(256, 256, kernel_size=[3, 3], padding=1, data_format=NCHW)
(bn2): BatchNorm2D(num_features=256, momentum=0.9, epsilon=1e-05)
(relu): ReLU()
(downsample): Sequential(
(0): Conv2D(128, 256, kernel_size=[1, 1], stride=[2, 2], data_format=NCHW)
(1): BatchNorm2D(num_features=256, momentum=0.9, epsilon=1e-05)
)
)
(1): Block(
(conv1): Conv2D(256, 256, kernel_size=[3, 3], padding=1, data_format=NCHW)
(bn1): BatchNorm2D(num_features=256, momentum=0.9, epsilon=1e-05)
(conv2): Conv2D(256, 256, kernel_size=[3, 3], padding=1, data_format=NCHW)
(bn2): BatchNorm2D(num_features=256, momentum=0.9, epsilon=1e-05)
(relu): ReLU()
(downsample): Identity()
)
)
(layer4): Sequential(
(0): Block(
(conv1): Conv2D(256, 512, kernel_size=[3, 3], stride=[2, 2], padding=1, data_format=NCHW)
(bn1): BatchNorm2D(num_features=512, momentum=0.9, epsilon=1e-05)
(conv2): Conv2D(512, 512, kernel_size=[3, 3], padding=1, data_format=NCHW)
(bn2): BatchNorm2D(num_features=512, momentum=0.9, epsilon=1e-05)
(relu): ReLU()
(downsample): Sequential(
(0): Conv2D(256, 512, kernel_size=[1, 1], stride=[2, 2], data_format=NCHW)
(1): BatchNorm2D(num_features=512, momentum=0.9, epsilon=1e-05)
)
)
(1): Block(
(conv1): Conv2D(512, 512, kernel_size=[3, 3], padding=1, data_format=NCHW)
(bn1): BatchNorm2D(num_features=512, momentum=0.9, epsilon=1e-05)
(conv2): Conv2D(512, 512, kernel_size=[3, 3], padding=1, data_format=NCHW)
(bn2): BatchNorm2D(num_features=512, momentum=0.9, epsilon=1e-05)
(relu): ReLU()
(downsample): Identity()
)
)
)
Process finished with exit code 0

代码

import paddle
import paddle.nn as nn
class Identity(nn.Layer):
def __init__(self):
super().__init__()
def forward(self, x):
return x
class Block(nn.Layer):
def __init__(self, in_dim, out_dim, stride):
super().__init__()
self.conv1 = nn.Conv2D(in_dim, out_dim, 3, stride, 1, bias_attr=False)
self.bn1 = nn.BatchNorm2D(out_dim)
self.conv2 = nn.Conv2D(out_dim, out_dim, 3, 1, 1, bias_attr=False)
self.bn2 = nn.BatchNorm2D(out_dim)
self.relu = nn.ReLU()
if stride == 2 or in_dim != out_dim:
self.downsample = nn.Sequential(
*[nn.Conv2D(in_dim, out_dim, 1, stride, bias_attr=False), nn.BatchNorm2D(out_dim)])
else:
self.downsample = Identity()
def forward(self, x):
h = x
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.conv2(x)
x = self.bn2(x)
identity = self.downsample(h)
x = x + identity
x = self.relu(x)
return x
class ResNet18(nn.Layer):
def __init__(self, in_dim=64, num_classes=1000):
super().__init__()
self.in_dim = in_dim  # 差点忘了这一行
#     stem
self.conv1 = nn.Conv2D(in_channels=3, out_channels=in_dim, kernel_size=3, stride=1, padding=1, bias_attr=False)
self.bn1 = nn.BatchNorm2D(in_dim)
self.relu = nn.ReLU()
#     head
self.avagPool = nn.AdaptiveAvgPool2D(1)
self.classifier = nn.Linear(512, num_classes)
# blocks
self.layer1 = self.makelayer(64, 2, 1)
self.layer2 = self.makelayer(128, 2, 2)
self.layer3 = self.makelayer(256, 2, 2)
self.layer4 = self.makelayer(512, 2, 2)
def makelayer(self, out_dim, n_blocks, stride):
layer_list = []
layer_list.append(Block(self.in_dim, out_dim, stride))  # 哦对,这里的self.in_dim是这个类的,不是这个函数的.
self.in_dim = out_dim
for i in range(1, n_blocks):
layer_list.append(Block(self.in_dim, out_dim, stride=1))
return nn.Sequential(*layer_list)
def forward(self, x):
x=self.conv1(x)
x=self.bn1(x)
x=self.relu(x)
#blocks
x=self.layer1(x)
x=self.layer2(x)
x=self.layer3(x)
x=self.layer4(x)
#head
x=self.avagPool(x)
# print("preflatten:",x.shape)
x=x.flatten(1)
# print("flatten:",x.shape)
x=self.classifier(x)
# print("classifier:",x.shape)
return x
def main():
model=ResNet18()
x=paddle.randn([2,3,32,32])
out=model(x)
print(model)
# print("x.shape:",x.shape)
if __name__ == "__main__":
main()
版权声明:本文内容由互联网用户自发贡献,该文观点仅代表作者本人。本站仅提供信息存储空间服务,不拥有所有权,不承担相关法律责任。如发现本站有涉嫌侵权/违法违规的内容, 请发送邮件至 举报,一经查实,本站将立刻删除。

发布者:全栈程序员-用户IM,转载请注明出处:https://javaforall.cn/141235.html原文链接:https://javaforall.cn

【正版授权,激活自己账号】: Jetbrains全家桶Ide使用,1年售后保障,每天仅需1毛

【官方授权 正版激活】: 官方授权 正版激活 支持Jetbrains家族下所有IDE 使用个人JB账号...

(0)


相关推荐

发表回复

您的电子邮箱地址不会被公开。

关注全栈程序员社区公众号