resnet源码pytorch_pytorch yolov3训练自己的数据集

resnet源码pytorch_pytorch yolov3训练自己的数据集书上的,很抽象fromtorchimportnnfromtorch.nnimportfunctionalasFimportosimporttorchvisionclassResidualBlock(nn.Module):def__init__(self,inchannel,outchannel,stride=1,shortcut=None…

大家好,又见面了,我是你们的朋友全栈君。如果您正在找激活码,请点击查看最新教程,关注关注公众号 “全栈程序员社区” 获取激活教程,可能之前旧版本教程已经失效.最新Idea2022.1教程亲测有效,一键激活。

Jetbrains全系列IDE稳定放心使用

书上的,很抽象

from torch import nn
from torch.nn import functional as F
import os
import torchvision


class ResidualBlock(nn.Module):
    def __init__(self, inchannel, outchannel, stride=1, shortcut=None):
        super(ResidualBlock, self).__init__()
        self.left = nn.Sequential(
            nn.Conv2d(inchannel, outchannel, 3, stride=stride, padding=1, bias=False),
            nn.BatchNorm2d(outchannel),
            nn.ReLU(inplace=True),
            nn.Conv2d(outchannel, outchannel, 3, stride=1, padding=1, bias=False),
            nn.BatchNorm2d(outchannel)
        )
        self.right = shortcut

    def forward(self, x):
        out = self.left(x)
        residual = x if self.right is None else self.right(x)
        out += residual
        return F.relu(out)


class ResNetmy(nn.Module):
    def __init__(self, num_classes=1000):
        super(ResNetmy, self).__init__()

        self.pre = nn.Sequential(
            nn.Conv2d(3, 64, 7, 2, 3, bias=False),
            nn.BatchNorm2d(64),
            nn.ReLU(inplace=True),
            nn.MaxPool2d(3, 2, 1))
        self.layer1 = self._make_layer(64, 128, 3)
        self.layer2 = self._make_layer(128, 256, 4, stride=2)
        self.layer3 = self._make_layer(256, 512, 6, stride=2)
        self.layer4 = self._make_layer(512, 512, 3, stride=2)
        self.fc = nn.Linear(512, num_classes)

    def _make_layer(self, inchannel, outchannel, block_num, stride=1):
        shortcut = nn.Sequential(
            nn.Conv2d(inchannel, outchannel, 1, stride, bias=False),
            nn.BatchNorm2d(outchannel)
        )

        layers = []
        layers.append(ResidualBlock(inchannel, outchannel, stride, shortcut))

        for i in range(1, block_num):
            layers.append(ResidualBlock(outchannel, outchannel))
        return nn.Sequential(*layers)

    def forward(self, x):
        print('pre:',x.size())
        x = self.pre(x)
        print(x.size())
        x = self.layer1(x)
        print(x.size())
        x = self.layer2(x)
        print(x.size())
        x = self.layer3(x)
        print(x.size())
        x = self.layer4(x)
        print(x.size())
        x = F.avg_pool2d(x, 7)
        print(x.size())
        x = x.view(x.size(0), -1)
        return self.fc(x)


if __name__ == '__main__':
    os.environ["CUDA_VISIBLE_DEVICES"] = "3"
    model = ResNetmy()
    print(model)
    resnet34 = torchvision.models.resnet34(pretrained=False)
    print(resnet34)

自己搭的很复杂的一个

from torch import nn
from torch.nn import functional as F
import os
import torchvision
from torchvision.transforms import ToTensor

totensor = ToTensor()


class myRes34(nn.Module):
    def __init__(self):
        super(myRes34, self).__init__()
        self.pre = nn.Sequential(
            nn.Conv2d(3, 64, 7, 2, 3, bias=False),
            nn.MaxPool2d(2)
        )
        self.f64conv_1 = nn.Sequential(
            nn.Conv2d(64, 64, 3, 1, 1, bias=False),
            nn.BatchNorm2d(64),
            nn.ReLU(),
            nn.Conv2d(64, 64, 3, 1, 1, bias=False),
            nn.BatchNorm2d(64),
            nn.ReLU()
        )
        self.f64conv_2 = nn.Sequential(
            nn.Conv2d(64, 64, 3, 1, 1, bias=False),
            nn.BatchNorm2d(64),
            nn.ReLU(),
            nn.Conv2d(64, 64, 3, 1, 1, bias=False),
            nn.BatchNorm2d(64),
            nn.ReLU()
        )
        self.f64conv_3 = nn.Sequential(
            nn.Conv2d(64, 64, 3, 1, 1, bias=False),
            nn.BatchNorm2d(64),
            nn.ReLU(),
            nn.Conv2d(64, 64, 3, 1, 1, bias=False),
            nn.BatchNorm2d(64),
            nn.ReLU()
        )
        self.residual64_128 = nn.Sequential(
            nn.Conv2d(64, 128, 1, 2, bias=False),
            nn.BatchNorm2d(128)
        )
        self.f128conv_1 = nn.Sequential(
            nn.Conv2d(64, 128, 3, 2, 1, bias=False),
            nn.BatchNorm2d(128),
            nn.ReLU(),
            nn.Conv2d(128, 128, 3, 1, 1, bias=False),
            nn.BatchNorm2d(128)
        )
        self.f128conv_2 = nn.Sequential(
            nn.Conv2d(128, 128, 3, 1, 1, bias=False),
            nn.BatchNorm2d(128),
            nn.ReLU(),
            nn.Conv2d(128, 128, 3, 1, 1, bias=False),
            nn.BatchNorm2d(128),
            nn.ReLU()
        )
        self.f128conv_3 = nn.Sequential(
            nn.Conv2d(128, 128, 3, 1, 1, bias=False),
            nn.BatchNorm2d(128),
            nn.ReLU(),
            nn.Conv2d(128, 128, 3, 1, 1, bias=False),
            nn.BatchNorm2d(128),
            nn.ReLU()
        )
        self.f128conv_4 = nn.Sequential(
            nn.Conv2d(128, 128, 3, 1, 1, bias=False),
            nn.BatchNorm2d(128),
            nn.ReLU(),
            nn.Conv2d(128, 128, 3, 1, 1, bias=False),
            nn.BatchNorm2d(128),
            nn.ReLU()
        )
        self.residual128_256 = nn.Sequential(
            nn.Conv2d(128, 256, 1, 2, bias=False),
            nn.BatchNorm2d(256)
        )
        self.f256conv_1 = nn.Sequential(
            nn.Conv2d(128, 256, 3, 2, 1, bias=False),
            nn.BatchNorm2d(256),
            nn.ReLU(),
            nn.Conv2d(256, 256, 3, 1, 1, bias=False),
            nn.BatchNorm2d(256)
        )
        self.f256conv_2 = nn.Sequential(
            nn.Conv2d(256, 256, 3, 1, 1, bias=False),
            nn.BatchNorm2d(256),
            nn.ReLU(),
            nn.Conv2d(256, 256, 3, 1, 1, bias=False),
            nn.BatchNorm2d(256),
            nn.ReLU()
        )
        self.f256conv_3 = nn.Sequential(
            nn.Conv2d(256, 256, 3, 1, 1, bias=False),
            nn.BatchNorm2d(256),
            nn.ReLU(),
            nn.Conv2d(256, 256, 3, 1, 1, bias=False),
            nn.BatchNorm2d(256),
            nn.ReLU()
        )
        self.f256conv_4 = nn.Sequential(
            nn.Conv2d(256, 256, 3, 1, 1, bias=False),
            nn.BatchNorm2d(256),
            nn.ReLU(),
            nn.Conv2d(256, 256, 3, 1, 1, bias=False),
            nn.BatchNorm2d(256),
            nn.ReLU()
        )
        self.f256conv_5 = nn.Sequential(
            nn.Conv2d(256, 256, 3, 1, 1, bias=False),
            nn.BatchNorm2d(256),
            nn.ReLU(),
            nn.Conv2d(256, 256, 3, 1, 1, bias=False),
            nn.BatchNorm2d(256),
            nn.ReLU()
        )
        self.f256conv_6 = nn.Sequential(
            nn.Conv2d(256, 256, 3, 1, 1, bias=False),
            nn.BatchNorm2d(256),
            nn.ReLU(),
            nn.Conv2d(256, 256, 3, 1, 1, bias=False),
            nn.BatchNorm2d(256),
            nn.ReLU()
        )
        self.residual256_512 = nn.Sequential(
            nn.Conv2d(256, 512, 1, 2, bias=False),
            nn.BatchNorm2d(512)
        )
        self.f512conv_1 = nn.Sequential(
            nn.Conv2d(256, 512, 3, 2, 1, bias=False),
            nn.BatchNorm2d(512),
            nn.ReLU(),
            nn.Conv2d(512, 512, 3, 1, 1, bias=False),
            nn.BatchNorm2d(512)
        )
        self.f512conv_2 = nn.Sequential(
            nn.Conv2d(512, 512, 3, 1, 1, bias=False),
            nn.BatchNorm2d(512),
            nn.ReLU(),
            nn.Conv2d(512, 512, 3, 1, 1, bias=False),
            nn.BatchNorm2d(512),
            nn.ReLU()
        )
        self.f512conv_3 = nn.Sequential(
            nn.Conv2d(512, 512, 3, 1, 1, bias=False),
            nn.BatchNorm2d(512),
            nn.ReLU(),
            nn.Conv2d(512, 512, 3, 1, 1, bias=False),
            nn.BatchNorm2d(512),
            nn.ReLU()
        )
        self.fc = nn.Linear(512, 1000)
        self.myrelu1 = nn.ReLU()
        self.myrelu2 = nn.ReLU()
        self.myrelu3 = nn.ReLU()

    def forward(self, x):
        x = self.pre(x)
        x = self.f64conv_1(x)
        x = self.f64conv_2(x)
        x = self.f64conv_3(x)
        print(type(x))
        x = self.myrelu1(self.f128conv_1(x) + self.residual64_128(x))
        x = self.f128conv_2(x)
        x = self.f128conv_3(x)
        x = self.f128conv_4(x)
        x = self.myrelu2(self.f256conv_1(x) + self.residual128_256(x))
        x = self.f256conv_2(x)
        x = self.f256conv_3(x)
        x = self.f256conv_4(x)
        x = self.f256conv_5(x)
        x = self.f256conv_6(x)
        x = self.myrelu3(self.f512conv_1(x) + self.residual256_512(x))
        x = self.f512conv_2(x)
        x = self.f512conv_3(x)
        x = F.avg_pool2d(x, 7)
        x = x.view(x.size(0), -1)
        return self.fc(x)


if __name__ == '__main__':
    os.environ["CUDA_VISIBLE_DEVICES"] = "3"
    model = myRes34()
    print(model)
    resnet34 = torchvision.models.resnet34(pretrained=False)
    print(resnet34)

 

版权声明:本文内容由互联网用户自发贡献,该文观点仅代表作者本人。本站仅提供信息存储空间服务,不拥有所有权,不承担相关法律责任。如发现本站有涉嫌侵权/违法违规的内容, 请发送邮件至 举报,一经查实,本站将立刻删除。

发布者:全栈程序员-用户IM,转载请注明出处:https://javaforall.cn/185515.html原文链接:https://javaforall.cn

【正版授权,激活自己账号】: Jetbrains全家桶Ide使用,1年售后保障,每天仅需1毛

【官方授权 正版激活】: 官方授权 正版激活 支持Jetbrains家族下所有IDE 使用个人JB账号...

(0)


相关推荐

  • spring事务的传播行为和隔离级别_spring常用的事务传播行为

    spring事务的传播行为和隔离级别_spring常用的事务传播行为  本文主要介绍下Spring事务中的传播行为。事务传播行为介绍Spring中的7个事务传播行为:事务行为说明PROPAGATION_REQUIRED支持当前事务,假设当前没有事务。就新建一个事务PROPAGATION_SUPPORTS支持当前事务,假设当前没有事务,就以非事务方式运行PROPAGATION_MANDATORY支持当前事务,假设当前没有事…

    2022年10月29日
  • Linux route add_linux系统route命令

    Linux route add_linux系统route命令routeadd命令的主要作用是加入静态路由,通常的格式是:routeADD157.0.0.0MASK255.0.0.0157.55.80.1METRIC3IF2參数含义:destinationmaskgatewaymetricinterface/*能够缩写*/destination【网段地址】mask【子网掩码】gateway【网关地址】…

  • 关于图像特征提取

     网上发现一篇不错的文章,是关于图像特征提取的,给自己做的项目有点类似,发出来供大家参考。      特征提取是计算机视觉和图像处理中的一个概念。它指的是使用计算机提取图像信息,决定每个图像的点是否属于一个图像特征。特征提取的结果是把图像上的点分为不同的子集,这些子集往往属于孤立的点、连续的曲线或者连续的区域。特征的定义       至今为止特征没有万能和精确的定义。特征的精确定义往往

  • Runnable接口详细详解「建议收藏」

    Runnable接口详细详解「建议收藏」创建线程对象,默认有一个线程名,以Thread-开头,从0开始计数构造函数Thread()Thread-0Thread-1Thread-2其他构造方法Thread(Runnabletarget)如果在构造thread的时候没有传递Runnable或者没有复写Thread的run方法,该thread将不会调用任何的东西,如果传递了Runnable接口的实例,后者复写了Thread的run方法,则会执行该方法的逻辑单元(逻辑代码)publicclassCreateThread2..

  • STM32CubeMX学习–(5)SPI读写W25Q128

    CUBE配置SPI配置引脚配置参数配置点击生成代码代码修改 uint8_tData1[4]={0x90,0x00,0x00,0x00}; uint8_tData2[2]={0x00,0x00}; uint8_tRxData[2]={0x00,0x00};/****************************/while(1){ HAL_GP…

  • 黑盒测试和白盒测试是软件测试的两种基本方法_软件测试白盒测试方法

    黑盒测试和白盒测试是软件测试的两种基本方法_软件测试白盒测试方法在这篇文章中,我们将讲解白盒测试的基本概念,以及四大常用的白盒测试方法。

    2022年10月24日

发表回复

您的电子邮箱地址不会被公开。

关注全栈程序员社区公众号