大家好,又见面了,我是你们的朋友全栈君。如果您正在找激活码,请点击查看最新教程,关注关注公众号 “全栈程序员社区” 获取激活教程,可能之前旧版本教程已经失效.最新Idea2022.1教程亲测有效,一键激活。
Jetbrains全系列IDE稳定放心使用
书上的,很抽象
from torch import nn
from torch.nn import functional as F
import os
import torchvision
class ResidualBlock(nn.Module):
def __init__(self, inchannel, outchannel, stride=1, shortcut=None):
super(ResidualBlock, self).__init__()
self.left = nn.Sequential(
nn.Conv2d(inchannel, outchannel, 3, stride=stride, padding=1, bias=False),
nn.BatchNorm2d(outchannel),
nn.ReLU(inplace=True),
nn.Conv2d(outchannel, outchannel, 3, stride=1, padding=1, bias=False),
nn.BatchNorm2d(outchannel)
)
self.right = shortcut
def forward(self, x):
out = self.left(x)
residual = x if self.right is None else self.right(x)
out += residual
return F.relu(out)
class ResNetmy(nn.Module):
def __init__(self, num_classes=1000):
super(ResNetmy, self).__init__()
self.pre = nn.Sequential(
nn.Conv2d(3, 64, 7, 2, 3, bias=False),
nn.BatchNorm2d(64),
nn.ReLU(inplace=True),
nn.MaxPool2d(3, 2, 1))
self.layer1 = self._make_layer(64, 128, 3)
self.layer2 = self._make_layer(128, 256, 4, stride=2)
self.layer3 = self._make_layer(256, 512, 6, stride=2)
self.layer4 = self._make_layer(512, 512, 3, stride=2)
self.fc = nn.Linear(512, num_classes)
def _make_layer(self, inchannel, outchannel, block_num, stride=1):
shortcut = nn.Sequential(
nn.Conv2d(inchannel, outchannel, 1, stride, bias=False),
nn.BatchNorm2d(outchannel)
)
layers = []
layers.append(ResidualBlock(inchannel, outchannel, stride, shortcut))
for i in range(1, block_num):
layers.append(ResidualBlock(outchannel, outchannel))
return nn.Sequential(*layers)
def forward(self, x):
print('pre:',x.size())
x = self.pre(x)
print(x.size())
x = self.layer1(x)
print(x.size())
x = self.layer2(x)
print(x.size())
x = self.layer3(x)
print(x.size())
x = self.layer4(x)
print(x.size())
x = F.avg_pool2d(x, 7)
print(x.size())
x = x.view(x.size(0), -1)
return self.fc(x)
if __name__ == '__main__':
os.environ["CUDA_VISIBLE_DEVICES"] = "3"
model = ResNetmy()
print(model)
resnet34 = torchvision.models.resnet34(pretrained=False)
print(resnet34)
自己搭的很复杂的一个
from torch import nn
from torch.nn import functional as F
import os
import torchvision
from torchvision.transforms import ToTensor
totensor = ToTensor()
class myRes34(nn.Module):
def __init__(self):
super(myRes34, self).__init__()
self.pre = nn.Sequential(
nn.Conv2d(3, 64, 7, 2, 3, bias=False),
nn.MaxPool2d(2)
)
self.f64conv_1 = nn.Sequential(
nn.Conv2d(64, 64, 3, 1, 1, bias=False),
nn.BatchNorm2d(64),
nn.ReLU(),
nn.Conv2d(64, 64, 3, 1, 1, bias=False),
nn.BatchNorm2d(64),
nn.ReLU()
)
self.f64conv_2 = nn.Sequential(
nn.Conv2d(64, 64, 3, 1, 1, bias=False),
nn.BatchNorm2d(64),
nn.ReLU(),
nn.Conv2d(64, 64, 3, 1, 1, bias=False),
nn.BatchNorm2d(64),
nn.ReLU()
)
self.f64conv_3 = nn.Sequential(
nn.Conv2d(64, 64, 3, 1, 1, bias=False),
nn.BatchNorm2d(64),
nn.ReLU(),
nn.Conv2d(64, 64, 3, 1, 1, bias=False),
nn.BatchNorm2d(64),
nn.ReLU()
)
self.residual64_128 = nn.Sequential(
nn.Conv2d(64, 128, 1, 2, bias=False),
nn.BatchNorm2d(128)
)
self.f128conv_1 = nn.Sequential(
nn.Conv2d(64, 128, 3, 2, 1, bias=False),
nn.BatchNorm2d(128),
nn.ReLU(),
nn.Conv2d(128, 128, 3, 1, 1, bias=False),
nn.BatchNorm2d(128)
)
self.f128conv_2 = nn.Sequential(
nn.Conv2d(128, 128, 3, 1, 1, bias=False),
nn.BatchNorm2d(128),
nn.ReLU(),
nn.Conv2d(128, 128, 3, 1, 1, bias=False),
nn.BatchNorm2d(128),
nn.ReLU()
)
self.f128conv_3 = nn.Sequential(
nn.Conv2d(128, 128, 3, 1, 1, bias=False),
nn.BatchNorm2d(128),
nn.ReLU(),
nn.Conv2d(128, 128, 3, 1, 1, bias=False),
nn.BatchNorm2d(128),
nn.ReLU()
)
self.f128conv_4 = nn.Sequential(
nn.Conv2d(128, 128, 3, 1, 1, bias=False),
nn.BatchNorm2d(128),
nn.ReLU(),
nn.Conv2d(128, 128, 3, 1, 1, bias=False),
nn.BatchNorm2d(128),
nn.ReLU()
)
self.residual128_256 = nn.Sequential(
nn.Conv2d(128, 256, 1, 2, bias=False),
nn.BatchNorm2d(256)
)
self.f256conv_1 = nn.Sequential(
nn.Conv2d(128, 256, 3, 2, 1, bias=False),
nn.BatchNorm2d(256),
nn.ReLU(),
nn.Conv2d(256, 256, 3, 1, 1, bias=False),
nn.BatchNorm2d(256)
)
self.f256conv_2 = nn.Sequential(
nn.Conv2d(256, 256, 3, 1, 1, bias=False),
nn.BatchNorm2d(256),
nn.ReLU(),
nn.Conv2d(256, 256, 3, 1, 1, bias=False),
nn.BatchNorm2d(256),
nn.ReLU()
)
self.f256conv_3 = nn.Sequential(
nn.Conv2d(256, 256, 3, 1, 1, bias=False),
nn.BatchNorm2d(256),
nn.ReLU(),
nn.Conv2d(256, 256, 3, 1, 1, bias=False),
nn.BatchNorm2d(256),
nn.ReLU()
)
self.f256conv_4 = nn.Sequential(
nn.Conv2d(256, 256, 3, 1, 1, bias=False),
nn.BatchNorm2d(256),
nn.ReLU(),
nn.Conv2d(256, 256, 3, 1, 1, bias=False),
nn.BatchNorm2d(256),
nn.ReLU()
)
self.f256conv_5 = nn.Sequential(
nn.Conv2d(256, 256, 3, 1, 1, bias=False),
nn.BatchNorm2d(256),
nn.ReLU(),
nn.Conv2d(256, 256, 3, 1, 1, bias=False),
nn.BatchNorm2d(256),
nn.ReLU()
)
self.f256conv_6 = nn.Sequential(
nn.Conv2d(256, 256, 3, 1, 1, bias=False),
nn.BatchNorm2d(256),
nn.ReLU(),
nn.Conv2d(256, 256, 3, 1, 1, bias=False),
nn.BatchNorm2d(256),
nn.ReLU()
)
self.residual256_512 = nn.Sequential(
nn.Conv2d(256, 512, 1, 2, bias=False),
nn.BatchNorm2d(512)
)
self.f512conv_1 = nn.Sequential(
nn.Conv2d(256, 512, 3, 2, 1, bias=False),
nn.BatchNorm2d(512),
nn.ReLU(),
nn.Conv2d(512, 512, 3, 1, 1, bias=False),
nn.BatchNorm2d(512)
)
self.f512conv_2 = nn.Sequential(
nn.Conv2d(512, 512, 3, 1, 1, bias=False),
nn.BatchNorm2d(512),
nn.ReLU(),
nn.Conv2d(512, 512, 3, 1, 1, bias=False),
nn.BatchNorm2d(512),
nn.ReLU()
)
self.f512conv_3 = nn.Sequential(
nn.Conv2d(512, 512, 3, 1, 1, bias=False),
nn.BatchNorm2d(512),
nn.ReLU(),
nn.Conv2d(512, 512, 3, 1, 1, bias=False),
nn.BatchNorm2d(512),
nn.ReLU()
)
self.fc = nn.Linear(512, 1000)
self.myrelu1 = nn.ReLU()
self.myrelu2 = nn.ReLU()
self.myrelu3 = nn.ReLU()
def forward(self, x):
x = self.pre(x)
x = self.f64conv_1(x)
x = self.f64conv_2(x)
x = self.f64conv_3(x)
print(type(x))
x = self.myrelu1(self.f128conv_1(x) + self.residual64_128(x))
x = self.f128conv_2(x)
x = self.f128conv_3(x)
x = self.f128conv_4(x)
x = self.myrelu2(self.f256conv_1(x) + self.residual128_256(x))
x = self.f256conv_2(x)
x = self.f256conv_3(x)
x = self.f256conv_4(x)
x = self.f256conv_5(x)
x = self.f256conv_6(x)
x = self.myrelu3(self.f512conv_1(x) + self.residual256_512(x))
x = self.f512conv_2(x)
x = self.f512conv_3(x)
x = F.avg_pool2d(x, 7)
x = x.view(x.size(0), -1)
return self.fc(x)
if __name__ == '__main__':
os.environ["CUDA_VISIBLE_DEVICES"] = "3"
model = myRes34()
print(model)
resnet34 = torchvision.models.resnet34(pretrained=False)
print(resnet34)
发布者:全栈程序员-用户IM,转载请注明出处:https://javaforall.cn/185515.html原文链接:https://javaforall.cn
【正版授权,激活自己账号】: Jetbrains全家桶Ide使用,1年售后保障,每天仅需1毛
【官方授权 正版激活】: 官方授权 正版激活 支持Jetbrains家族下所有IDE 使用个人JB账号...