|
- #!/usr/bin/env python
- # -*- coding: utf-8 -*-
- # @Time : 2023/2/6 下午2:59
- # @File : model.py
- # ----------------------------------------------
- # ☆ ☆ ☆ ☆ ☆ ☆ ☆ ☆ ☆ ☆ ☆ ☆ ☆ ☆ ☆ ☆ ☆ ☆ ☆ ☆ ☆
- # >>> Author : Kevin Chang
- # >>> QQ : 565479588
- # >>> Mail : lovecode@gmail.com
- # >>> Github : https://github.com/lovecode100
- # >>> Blog : https://www.cnblogs.com/lovecode
- # ☆ ☆ ☆ ☆ ☆ ☆ ☆ ☆ ☆ ☆ ☆ ☆ ☆ ☆ ☆ ☆ ☆ ☆ ☆ ☆ ☆
- import torch.nn as nn
- import torch
-
-
- # 整个AlexNet有8个需要训练参数的层(不包括池化层和LRN层),前5层为卷积层,后3层为全连接层
- class AlexNet(nn.Module):
- def __init__(self, num_classes=1000, init_weights=False):
- super(AlexNet, self).__init__()
- # 用nn.Sequential()将网络打包成一个模块,精简代码
- self.features = nn.Sequential( # 卷积层提取图像特征
- nn.Conv2d(3, 96, kernel_size=11, stride=4, padding=2), # input[3, 224, 224] output[96, 55, 55]
- nn.ReLU(inplace=True), # 直接修改覆盖原值,节省运算内存
- nn.MaxPool2d(kernel_size=3, stride=2), # output[96, 27, 27]
-
- nn.Conv2d(96, 256, kernel_size=5, padding=2), # output[256, 27, 27]
- nn.ReLU(inplace=True),
- nn.MaxPool2d(kernel_size=3, stride=2), # output[256, 13, 13]
-
- nn.Conv2d(256, 384, kernel_size=3, padding=1), # output[384, 13, 13]
- nn.ReLU(inplace=True),
-
- nn.Conv2d(384, 384, kernel_size=3, padding=1), # output[384, 13, 13]
- nn.ReLU(inplace=True),
-
- nn.Conv2d(384, 256, kernel_size=3, padding=1), # output[256, 13, 13]
- nn.ReLU(inplace=True),
- nn.MaxPool2d(kernel_size=3, stride=2), # output[256, 6, 6]
- )
- self.classifier = nn.Sequential( # 全连接层对图像分类
- nn.Dropout(p=0.5), # Dropout 随机失活神经元,默认比例为0.5
- nn.Linear(256 * 6 * 6, 4096),
- nn.ReLU(inplace=True),
- nn.Dropout(p=0.5),
- nn.Linear(4096, 4096),
- nn.ReLU(inplace=True),
- nn.Linear(4096, num_classes),
- )
- if init_weights:
- self._initialize_weights()
-
- def forward(self, x):
- x = self.features(x)
- x = torch.flatten(x, start_dim=1) # 展平后再传入全连接层
- x = self.classifier(x)
- return x
-
- # 网络权重初始化,实际上pytorch在构建网络时会自动初始化权重
- def _initialize_weights(self):
- for m in self.modules():
- if isinstance(m, nn.Conv2d): # 若是卷积层
- nn.init.kaiming_normal_(m.weight, mode='fan_out', # 用(何)kaiming_normal_法初始化权重
- nonlinearity='relu')
- if m.bias is not None:
- nn.init.constant_(m.bias, 0) # 初始化偏重为0
- elif isinstance(m, nn.Linear): # 若是全连接层
- nn.init.normal_(m.weight, 0, 0.01) # 正态分布初始化
- nn.init.constant_(m.bias, 0) # 初始化偏重为0
|