目标检测算法改进系列之Neck添加渐近特征金字塔网络(AFPN模块)

渐近特征金字塔网络(AFPN模块)

在目标检测任务中,多尺度特征对具有尺度差异的目标进行编码具有重要意义。多尺度特征提取的常用策略是采用经典的自顶向下和自底向上的特征金字塔网络。

然而,这些方法存在特征信息丢失或退化的问题,影响了非相邻水平的融合效果。提出了一种支持非相邻层次直接交互的渐近特征金字塔网络(AFPN)。AFPN首先融合两个相邻的低级特征,然后逐渐将高级特征融入融合过程。这样可以避免非相邻层之间较大的语义差距。考虑到在每个空间位置的特征融合过程中可能出现多目标信息冲突,进一步利用自适应空间融合操作来缓解这些不一致。

论文地址:AFPN: Asymptotic Feature Pyramid Network for Object Detection

AFPN结构图

代码实现

from collections import OrderedDictimport torch
import torch.nn as nn
import torch.nn.functional as Ffrom mmdet.registry import MODELSdef BasicConv(filter_in, filter_out, kernel_size, stride=1, pad=None):if not pad:pad = (kernel_size - 1) // 2 if kernel_size else 0else:pad = padreturn nn.Sequential(OrderedDict([("conv", nn.Conv2d(filter_in, filter_out, kernel_size=kernel_size, stride=stride, padding=pad, bias=False)),("bn", nn.BatchNorm2d(filter_out)),("relu", nn.ReLU(inplace=True)),]))class BasicBlock(nn.Module):expansion = 1def __init__(self, filter_in, filter_out):super(BasicBlock, self).__init__()self.conv1 = nn.Conv2d(filter_in, filter_out, 3, padding=1)self.bn1 = nn.BatchNorm2d(filter_out, momentum=0.1)self.relu = nn.ReLU(inplace=True)self.conv2 = nn.Conv2d(filter_out, filter_out, 3, padding=1)self.bn2 = nn.BatchNorm2d(filter_out, momentum=0.1)def forward(self, x):residual = xout = self.conv1(x)out = self.bn1(out)out = self.relu(out)out = self.conv2(out)out = self.bn2(out)out += residualout = self.relu(out)return outclass Upsample(nn.Module):def __init__(self, in_channels, out_channels, scale_factor=2):super(Upsample, self).__init__()self.upsample = nn.Sequential(BasicConv(in_channels, out_channels, 1),nn.Upsample(scale_factor=scale_factor, mode='bilinear'))# carafe# from mmcv.ops import CARAFEPack# self.upsample = nn.Sequential(#     BasicConv(in_channels, out_channels, 1),#     CARAFEPack(out_channels, scale_factor=scale_factor)# )def forward(self, x):x = self.upsample(x)return xclass Downsample_x2(nn.Module):def __init__(self, in_channels, out_channels):super(Downsample_x2, self).__init__()self.downsample = nn.Sequential(BasicConv(in_channels, out_channels, 2, 2, 0))def forward(self, x, ):x = self.downsample(x)return xclass Downsample_x4(nn.Module):def __init__(self, in_channels, out_channels):super(Downsample_x4, self).__init__()self.downsample = nn.Sequential(BasicConv(in_channels, out_channels, 4, 4, 0))def forward(self, x, ):x = self.downsample(x)return xclass Downsample_x8(nn.Module):def __init__(self, in_channels, out_channels):super(Downsample_x8, self).__init__()self.downsample = nn.Sequential(BasicConv(in_channels, out_channels, 8, 8, 0))def forward(self, x, ):x = self.downsample(x)return xclass ASFF_2(nn.Module):def __init__(self, inter_dim=512):super(ASFF_2, self).__init__()self.inter_dim = inter_dimcompress_c = 8self.weight_level_1 = BasicConv(self.inter_dim, compress_c, 1, 1)self.weight_level_2 = BasicConv(self.inter_dim, compress_c, 1, 1)self.weight_levels = nn.Conv2d(compress_c * 2, 2, kernel_size=1, stride=1, padding=0)self.conv = BasicConv(self.inter_dim, self.inter_dim, 3, 1)def forward(self, input1, input2):level_1_weight_v = self.weight_level_1(input1)level_2_weight_v = self.weight_level_2(input2)levels_weight_v = torch.cat((level_1_weight_v, level_2_weight_v), 1)levels_weight = self.weight_levels(levels_weight_v)levels_weight = F.softmax(levels_weight, dim=1)fused_out_reduced = input1 * levels_weight[:, 0:1, :, :] + \input2 * levels_weight[:, 1:2, :, :]out = self.conv(fused_out_reduced)return outclass ASFF_3(nn.Module):def __init__(self, inter_dim=512):super(ASFF_3, self).__init__()self.inter_dim = inter_dimcompress_c = 8self.weight_level_1 = BasicConv(self.inter_dim, compress_c, 1, 1)self.weight_level_2 = BasicConv(self.inter_dim, compress_c, 1, 1)self.weight_level_3 = BasicConv(self.inter_dim, compress_c, 1, 1)self.weight_levels = nn.Conv2d(compress_c * 3, 3, kernel_size=1, stride=1, padding=0)self.conv = BasicConv(self.inter_dim, self.inter_dim, 3, 1)def forward(self, input1, input2, input3):level_1_weight_v = self.weight_level_1(input1)level_2_weight_v = self.weight_level_2(input2)level_3_weight_v = self.weight_level_3(input3)levels_weight_v = torch.cat((level_1_weight_v, level_2_weight_v, level_3_weight_v), 1)levels_weight = self.weight_levels(levels_weight_v)levels_weight = F.softmax(levels_weight, dim=1)fused_out_reduced = input1 * levels_weight[:, 0:1, :, :] + \input2 * levels_weight[:, 1:2, :, :] + \input3 * levels_weight[:, 2:, :, :]out = self.conv(fused_out_reduced)return outclass ASFF_4(nn.Module):def __init__(self, inter_dim=512):super(ASFF_4, self).__init__()self.inter_dim = inter_dimcompress_c = 8self.weight_level_0 = BasicConv(self.inter_dim, compress_c, 1, 1)self.weight_level_1 = BasicConv(self.inter_dim, compress_c, 1, 1)self.weight_level_2 = BasicConv(self.inter_dim, compress_c, 1, 1)self.weight_level_3 = BasicConv(self.inter_dim, compress_c, 1, 1)self.weight_levels = nn.Conv2d(compress_c * 4, 4, kernel_size=1, stride=1, padding=0)self.conv = BasicConv(self.inter_dim, self.inter_dim, 3, 1)def forward(self, input0, input1, input2, input3):level_0_weight_v = self.weight_level_0(input0)level_1_weight_v = self.weight_level_1(input1)level_2_weight_v = self.weight_level_2(input2)level_3_weight_v = self.weight_level_3(input3)levels_weight_v = torch.cat((level_0_weight_v, level_1_weight_v, level_2_weight_v, level_3_weight_v), 1)levels_weight = self.weight_levels(levels_weight_v)levels_weight = F.softmax(levels_weight, dim=1)fused_out_reduced = input0 * levels_weight[:, 0:1, :, :] + \input1 * levels_weight[:, 1:2, :, :] + \input2 * levels_weight[:, 2:3, :, :] + \input3 * levels_weight[:, 3:, :, :]out = self.conv(fused_out_reduced)return outclass BlockBody(nn.Module):def __init__(self, channels=[64, 128, 256, 512]):super(BlockBody, self).__init__()self.blocks_scalezero1 = nn.Sequential(BasicConv(channels[0], channels[0], 1),)self.blocks_scaleone1 = nn.Sequential(BasicConv(channels[1], channels[1], 1),)self.blocks_scaletwo1 = nn.Sequential(BasicConv(channels[2], channels[2], 1),)self.blocks_scalethree1 = nn.Sequential(BasicConv(channels[3], channels[3], 1),)self.downsample_scalezero1_2 = Downsample_x2(channels[0], channels[1])self.upsample_scaleone1_2 = Upsample(channels[1], channels[0], scale_factor=2)self.asff_scalezero1 = ASFF_2(inter_dim=channels[0])self.asff_scaleone1 = ASFF_2(inter_dim=channels[1])self.blocks_scalezero2 = nn.Sequential(BasicBlock(channels[0], channels[0]),BasicBlock(channels[0], channels[0]),BasicBlock(channels[0], channels[0]),BasicBlock(channels[0], channels[0]),)self.blocks_scaleone2 = nn.Sequential(BasicBlock(channels[1], channels[1]),BasicBlock(channels[1], channels[1]),BasicBlock(channels[1], channels[1]),BasicBlock(channels[1], channels[1]),)self.downsample_scalezero2_2 = Downsample_x2(channels[0], channels[1])self.downsample_scalezero2_4 = Downsample_x4(channels[0], channels[2])self.downsample_scaleone2_2 = Downsample_x2(channels[1], channels[2])self.upsample_scaleone2_2 = Upsample(channels[1], channels[0], scale_factor=2)self.upsample_scaletwo2_2 = Upsample(channels[2], channels[1], scale_factor=2)self.upsample_scaletwo2_4 = Upsample(channels[2], channels[0], scale_factor=4)self.asff_scalezero2 = ASFF_3(inter_dim=channels[0])self.asff_scaleone2 = ASFF_3(inter_dim=channels[1])self.asff_scaletwo2 = ASFF_3(inter_dim=channels[2])self.blocks_scalezero3 = nn.Sequential(BasicBlock(channels[0], channels[0]),BasicBlock(channels[0], channels[0]),BasicBlock(channels[0], channels[0]),BasicBlock(channels[0], channels[0]),)self.blocks_scaleone3 = nn.Sequential(BasicBlock(channels[1], channels[1]),BasicBlock(channels[1], channels[1]),BasicBlock(channels[1], channels[1]),BasicBlock(channels[1], channels[1]),)self.blocks_scaletwo3 = nn.Sequential(BasicBlock(channels[2], channels[2]),BasicBlock(channels[2], channels[2]),BasicBlock(channels[2], channels[2]),BasicBlock(channels[2], channels[2]),)self.downsample_scalezero3_2 = Downsample_x2(channels[0], channels[1])self.downsample_scalezero3_4 = Downsample_x4(channels[0], channels[2])self.downsample_scalezero3_8 = Downsample_x8(channels[0], channels[3])self.upsample_scaleone3_2 = Upsample(channels[1], channels[0], scale_factor=2)self.downsample_scaleone3_2 = Downsample_x2(channels[1], channels[2])self.downsample_scaleone3_4 = Downsample_x4(channels[1], channels[3])self.upsample_scaletwo3_4 = Upsample(channels[2], channels[0], scale_factor=4)self.upsample_scaletwo3_2 = Upsample(channels[2], channels[1], scale_factor=2)self.downsample_scaletwo3_2 = Downsample_x2(channels[2], channels[3])self.upsample_scalethree3_8 = Upsample(channels[3], channels[0], scale_factor=8)self.upsample_scalethree3_4 = Upsample(channels[3], channels[1], scale_factor=4)self.upsample_scalethree3_2 = Upsample(channels[3], channels[2], scale_factor=2)self.asff_scalezero3 = ASFF_4(inter_dim=channels[0])self.asff_scaleone3 = ASFF_4(inter_dim=channels[1])self.asff_scaletwo3 = ASFF_4(inter_dim=channels[2])self.asff_scalethree3 = ASFF_4(inter_dim=channels[3])self.blocks_scalezero4 = nn.Sequential(BasicBlock(channels[0], channels[0]),BasicBlock(channels[0], channels[0]),BasicBlock(channels[0], channels[0]),BasicBlock(channels[0], channels[0]),)self.blocks_scaleone4 = nn.Sequential(BasicBlock(channels[1], channels[1]),BasicBlock(channels[1], channels[1]),BasicBlock(channels[1], channels[1]),BasicBlock(channels[1], channels[1]),)self.blocks_scaletwo4 = nn.Sequential(BasicBlock(channels[2], channels[2]),BasicBlock(channels[2], channels[2]),BasicBlock(channels[2], channels[2]),BasicBlock(channels[2], channels[2]),)self.blocks_scalethree4 = nn.Sequential(BasicBlock(channels[3], channels[3]),BasicBlock(channels[3], channels[3]),BasicBlock(channels[3], channels[3]),BasicBlock(channels[3], channels[3]),)def forward(self, x):x0, x1, x2, x3 = xx0 = self.blocks_scalezero1(x0)x1 = self.blocks_scaleone1(x1)x2 = self.blocks_scaletwo1(x2)x3 = self.blocks_scalethree1(x3)scalezero = self.asff_scalezero1(x0, self.upsample_scaleone1_2(x1))scaleone = self.asff_scaleone1(self.downsample_scalezero1_2(x0), x1)x0 = self.blocks_scalezero2(scalezero)x1 = self.blocks_scaleone2(scaleone)scalezero = self.asff_scalezero2(x0, self.upsample_scaleone2_2(x1), self.upsample_scaletwo2_4(x2))scaleone = self.asff_scaleone2(self.downsample_scalezero2_2(x0), x1, self.upsample_scaletwo2_2(x2))scaletwo = self.asff_scaletwo2(self.downsample_scalezero2_4(x0), self.downsample_scaleone2_2(x1), x2)x0 = self.blocks_scalezero3(scalezero)x1 = self.blocks_scaleone3(scaleone)x2 = self.blocks_scaletwo3(scaletwo)scalezero = self.asff_scalezero3(x0, self.upsample_scaleone3_2(x1), self.upsample_scaletwo3_4(x2), self.upsample_scalethree3_8(x3))scaleone = self.asff_scaleone3(self.downsample_scalezero3_2(x0), x1, self.upsample_scaletwo3_2(x2), self.upsample_scalethree3_4(x3))scaletwo = self.asff_scaletwo3(self.downsample_scalezero3_4(x0), self.downsample_scaleone3_2(x1), x2, self.upsample_scalethree3_2(x3))scalethree = self.asff_scalethree3(self.downsample_scalezero3_8(x0), self.downsample_scaleone3_4(x1), self.downsample_scaletwo3_2(x2), x3)scalezero = self.blocks_scalezero4(scalezero)scaleone = self.blocks_scaleone4(scaleone)scaletwo = self.blocks_scaletwo4(scaletwo)scalethree = self.blocks_scalethree4(scalethree)return scalezero, scaleone, scaletwo, scalethree@MODELS.register_module()
class AFPN(nn.Module):def __init__(self,in_channels=[256, 512, 1024, 2048],out_channels=256):super(AFPN, self).__init__()self.fp16_enabled = Falseself.conv0 = BasicConv(in_channels[0], in_channels[0] // 8, 1)self.conv1 = BasicConv(in_channels[1], in_channels[1] // 8, 1)self.conv2 = BasicConv(in_channels[2], in_channels[2] // 8, 1)self.conv3 = BasicConv(in_channels[3], in_channels[3] // 8, 1)self.body = nn.Sequential(BlockBody([in_channels[0] // 8, in_channels[1] // 8, in_channels[2] // 8, in_channels[3] // 8]))self.conv00 = BasicConv(in_channels[0] // 8, out_channels, 1)self.conv11 = BasicConv(in_channels[1] // 8, out_channels, 1)self.conv22 = BasicConv(in_channels[2] // 8, out_channels, 1)self.conv33 = BasicConv(in_channels[3] // 8, out_channels, 1)self.conv44 = nn.MaxPool2d(kernel_size=1, stride=2)# init weightfor m in self.modules():if isinstance(m, nn.Conv2d):nn.init.xavier_normal_(m.weight, gain=0.02)elif isinstance(m, nn.BatchNorm2d):torch.nn.init.normal_(m.weight.data, 1.0, 0.02)torch.nn.init.constant_(m.bias.data, 0.0)def forward(self, x):x0, x1, x2, x3 = xx0 = self.conv0(x0)x1 = self.conv1(x1)x2 = self.conv2(x2)x3 = self.conv3(x3)out0, out1, out2, out3 = self.body([x0, x1, x2, x3])out0 = self.conv00(out0)out1 = self.conv11(out1)out2 = self.conv22(out2)out3 = self.conv33(out3)out4 = self.conv44(out3)return out0, out1, out2, out3, out4if __name__ == "__main__":print()

YOLOv5添加AFPN模块

YOLOv8、YOLOv7、YOLOv5、YOLOv4、Faster-rcnn等目标检测算法都可以使用,YOLOv5添加AFPN模块代码实例如下:

from collections import OrderedDict
import torch
import torch.nn as nn
import torch.nn.functional as Ffrom mmyolo.registry import MODELSdef BasicConv(filter_in, filter_out, kernel_size, stride=1):pad = (kernel_size - 1) // 2 if kernel_size else 0return nn.Sequential(OrderedDict([("conv", nn.Conv2d(filter_in, filter_out, kernel_size=kernel_size, stride=stride, padding=pad, bias=False)),("bn", nn.BatchNorm2d(filter_out)),("silu", nn.SiLU(inplace=True)),]))def Conv(filter_in, filter_out, kernel_size, stride=1, pad=0):return nn.Sequential(OrderedDict([("conv", nn.Conv2d(filter_in, filter_out, kernel_size=kernel_size, stride=stride, padding=pad, bias=False)),("bn", nn.BatchNorm2d(filter_out)),("silu", nn.SiLU(inplace=True)),]))class BasicBlock(nn.Module):expansion = 1def __init__(self, filter_in, filter_out):super(BasicBlock, self).__init__()self.conv1 = nn.Conv2d(filter_in, filter_out, 3, padding=1)self.bn1 = nn.BatchNorm2d(filter_out, momentum=0.1)self.silu = nn.SiLU(inplace=True)self.conv2 = nn.Conv2d(filter_out, filter_out, 3, padding=1)self.bn2 = nn.BatchNorm2d(filter_out, momentum=0.1)def forward(self, x):residual = xout = self.conv1(x)out = self.bn1(out)out = self.silu(out)out = self.conv2(out)out = self.bn2(out)out += residualout = self.silu(out)return outclass Upsample(nn.Module):def __init__(self, in_channels, out_channels, scale_factor=2):super(Upsample, self).__init__()self.upsample = nn.Sequential(BasicConv(in_channels, out_channels, 1),nn.Upsample(scale_factor=scale_factor, mode='bilinear'))def forward(self, x, ):x = self.upsample(x)return xclass Downsample_x2(nn.Module):def __init__(self, in_channels, out_channels):super(Downsample_x2, self).__init__()self.downsample = nn.Sequential(Conv(in_channels, out_channels, 2, 2))def forward(self, x, ):x = self.downsample(x)return xclass Downsample_x4(nn.Module):def __init__(self, in_channels, out_channels):super(Downsample_x4, self).__init__()self.downsample = nn.Sequential(Conv(in_channels, out_channels, 4, 4))def forward(self, x, ):x = self.downsample(x)return xclass ASFF_2(nn.Module):def __init__(self, inter_dim=512):super(ASFF_2, self).__init__()self.inter_dim = inter_dimcompress_c = 8self.weight_level_1 = BasicConv(self.inter_dim, compress_c, 1, 1)self.weight_level_2 = BasicConv(self.inter_dim, compress_c, 1, 1)self.weight_levels = nn.Conv2d(compress_c * 2, 2, kernel_size=1, stride=1, padding=0)self.conv = BasicConv(self.inter_dim, self.inter_dim, 3, 1)def forward(self, input1, input2):level_1_weight_v = self.weight_level_1(input1)level_2_weight_v = self.weight_level_2(input2)levels_weight_v = torch.cat((level_1_weight_v, level_2_weight_v), 1)levels_weight = self.weight_levels(levels_weight_v)levels_weight = F.softmax(levels_weight, dim=1)fused_out_reduced = input1 * levels_weight[:, 0:1, :, :] + \input2 * levels_weight[:, 1:2, :, :]out = self.conv(fused_out_reduced)return outclass ASFF_3(nn.Module):def __init__(self, inter_dim=512):super(ASFF_3, self).__init__()self.inter_dim = inter_dimcompress_c = 8self.weight_level_1 = BasicConv(self.inter_dim, compress_c, 1, 1)self.weight_level_2 = BasicConv(self.inter_dim, compress_c, 1, 1)self.weight_level_3 = BasicConv(self.inter_dim, compress_c, 1, 1)self.weight_levels = nn.Conv2d(compress_c * 3, 3, kernel_size=1, stride=1, padding=0)self.conv = BasicConv(self.inter_dim, self.inter_dim, 3, 1)def forward(self, input1, input2, input3):level_1_weight_v = self.weight_level_1(input1)level_2_weight_v = self.weight_level_2(input2)level_3_weight_v = self.weight_level_3(input3)levels_weight_v = torch.cat((level_1_weight_v, level_2_weight_v, level_3_weight_v), 1)levels_weight = self.weight_levels(levels_weight_v)levels_weight = F.softmax(levels_weight, dim=1)fused_out_reduced = input1 * levels_weight[:, 0:1, :, :] + \input2 * levels_weight[:, 1:2, :, :] + \input3 * levels_weight[:, 2:, :, :]out = self.conv(fused_out_reduced)return outclass ScaleBlockBody(nn.Module):def __init__(self, channels=[128, 256, 512]):super(ScaleBlockBody, self).__init__()self.blocks_top1 = nn.Sequential(BasicConv(channels[0], channels[0], 1),)self.blocks_mid1 = nn.Sequential(BasicConv(channels[1], channels[1], 1),)self.blocks_bot1 = nn.Sequential(BasicConv(channels[2], channels[2], 1),)self.downsample_top1_2 = Downsample_x2(channels[0], channels[1])self.upsample_mid1_2 = Upsample(channels[1], channels[0], scale_factor=2)self.asff_top1 = ASFF_2(inter_dim=channels[0])self.asff_mid1 = ASFF_2(inter_dim=channels[1])self.blocks_top2 = nn.Sequential(BasicBlock(channels[0], channels[0]),BasicBlock(channels[0], channels[0]),BasicBlock(channels[0], channels[0]))self.blocks_mid2 = nn.Sequential(BasicBlock(channels[1], channels[1]),BasicBlock(channels[1], channels[1]),BasicBlock(channels[1], channels[1]))self.downsample_top2_2 = Downsample_x2(channels[0], channels[1])self.downsample_top2_4 = Downsample_x4(channels[0], channels[2])self.downsample_mid2_2 = Downsample_x2(channels[1], channels[2])self.upsample_mid2_2 = Upsample(channels[1], channels[0], scale_factor=2)self.upsample_bot2_2 = Upsample(channels[2], channels[1], scale_factor=2)self.upsample_bot2_4 = Upsample(channels[2], channels[0], scale_factor=4)self.asff_top2 = ASFF_3(inter_dim=channels[0])self.asff_mid2 = ASFF_3(inter_dim=channels[1])self.asff_bot2 = ASFF_3(inter_dim=channels[2])self.blocks_top3 = nn.Sequential(BasicBlock(channels[0], channels[0]),BasicBlock(channels[0], channels[0]),BasicBlock(channels[0], channels[0]))self.blocks_mid3 = nn.Sequential(BasicBlock(channels[1], channels[1]),BasicBlock(channels[1], channels[1]),BasicBlock(channels[1], channels[1]))self.blocks_bot3 = nn.Sequential(BasicBlock(channels[2], channels[2]),BasicBlock(channels[2], channels[2]),BasicBlock(channels[2], channels[2]))def forward(self, x):x1, x2, x3 = xx1 = self.blocks_top1(x1)x2 = self.blocks_mid1(x2)x3 = self.blocks_bot1(x3)top = self.asff_top1(x1, self.upsample_mid1_2(x2))mid = self.asff_mid1(self.downsample_top1_2(x1), x2)x1 = self.blocks_top2(top)x2 = self.blocks_mid2(mid)top = self.asff_top2(x1, self.upsample_mid2_2(x2), self.upsample_bot2_4(x3))mid = self.asff_mid2(self.downsample_top2_2(x1), x2, self.upsample_bot2_2(x3))bot = self.asff_bot2(self.downsample_top2_4(x1), self.downsample_mid2_2(x2), x3)top = self.blocks_top3(top)mid = self.blocks_mid3(mid)bot = self.blocks_bot3(bot)return top, mid, bot@MODELS.register_module()
class YOLOv5AFPN(nn.Module):def __init__(self, in_channels=[256, 512, 1024], out_channels=[256, 512, 1024]):super(YOLOv5AFPN, self).__init__()self.conv1 = BasicConv(in_channels[0], in_channels[0] // 4, 1)self.conv2 = BasicConv(in_channels[1], in_channels[1] // 4, 1)self.conv3 = BasicConv(in_channels[2], in_channels[2] // 4, 1)self.body = nn.Sequential(ScaleBlockBody([in_channels[0] // 4, in_channels[1] // 4, in_channels[2] // 4]))self.conv11 = BasicConv(in_channels[0] // 4, out_channels[0], 1)self.conv22 = BasicConv(in_channels[1] // 4, out_channels[1], 1)self.conv33 = BasicConv(in_channels[2] // 4, out_channels[2], 1)# ----------------------------------------------------------------##   init weight# ----------------------------------------------------------------#for m in self.modules():if isinstance(m, nn.Conv2d):nn.init.xavier_normal_(m.weight, gain=0.02)elif isinstance(m, nn.BatchNorm2d):torch.nn.init.normal_(m.weight.data, 1.0, 0.02)torch.nn.init.constant_(m.bias.data, 0.0)def forward(self, x):x1, x2, x3 = xx1 = self.conv1(x1)x2 = self.conv2(x2)x3 = self.conv3(x3)out1, out2, out3 = self.body([x1, x2, x3])out1 = self.conv11(out1)out2 = self.conv22(out2)out3 = self.conv33(out3)return tuple([out1, out2, out3])

本文来自互联网用户投稿,该文观点仅代表作者本人,不代表本站立场。本站仅提供信息存储空间服务,不拥有所有权,不承担相关法律责任。如若转载,请注明出处:http://www.rhkb.cn/news/137652.html

如若内容造成侵权/违法违规/事实不符,请联系长河编程网进行投诉反馈email:809451989@qq.com,一经查实,立即删除!

相关文章

十四、流式编程(2)

本章概要 中间操作 跟踪和调试流元素排序移除元素应用函数到元素在 map() 中组合流 中间操作 中间操作用于从一个流中获取对象,并将对象作为另一个流从后端输出,以连接到其他操作。 跟踪和调试 peek() 操作的目的是帮助调试。它允许你无修改地查看…

Maven3.6.1下载和详细配置

1.下载maven 说明:以下载maven3.6.1为例 1.1网址 Maven – Welcome to Apache Maven 1.2点击下载 1.3点击Maven 3 archives 1.4 点击相应的版本 1.5 点击binaries下载 说明:binaries是二进制的意思 1.6点击zip格式 1.7 蓝奏云获取 说明&#xff1a…

gateway之断言的使用详解

文章目录 gateway产生的背景,为什么要是用gateway什么是网关gateway 带来的好处功能特征gateway在项目中使用的依赖 什么是断言断言分类内置自定义示例 断言和过滤器的不同 gateway产生的背景,为什么要是用gateway 一个系统会被拆分为多个微服务&#x…

软考 -- 计算机学习(2)

文章目录 一、安全性知识1.1 信息安全和信息系统安全1.2 信息安全技术1.3 网络安全技术 二、多媒体技术三、软件工程基础知识3.1 信息系统生命周期3.2 软件过程模型3.3 信息系统开发方法3.4 系统分析和设计概述3.5 结构化开发方法3.6 系统运行与维护 四、项目管理4.1 进度管理4…

说说hashCode() 和 equals() 之间的关系?

每天一道面试题,陪你突击金九银十! 上一篇关于介绍Object类下的几种方法时面试题时,提到equals()和hashCode()方法可能引出关于“hashCode() 和 equals() 之间的关系?”的面试题,本篇来解析一下这道基础面试题。 先祭一…

ESP-IDF学习——1.环境安装与hello-world

ESP-IDF学习——1.环境安装与hello-world 0.前言一、环境搭建1.官方IDE工具2.vscode图形化配置 二、示例工程三、自定义工程四、点灯五、总结 0.前言 最近在学习freertos,但由于买的书还没到,所以先捣鼓捣鼓ESP-IDF,因为这个比Arduino更接近底…

『贪吃蛇』AI 算法简易实现(中秋特别版)

前言 一年一度的中秋节就快到了,平台也有各种各样的中秋发文活动,正在翻阅时偶然间我看到了这篇文章:《兔饼大作战》:吃月饼、见月亮,还能咬自己?| 欢庆中秋特制版 - 掘金 (juejin.cn) 大家肯定比较熟悉了…

python处理CSV文件

CSV库还有其他处理CSV的方法,这里只是介绍几个常用的,后面如果用到别的会进行更新 目录 1 生成一个新的csv文件,并向其中写一点东西 2 单纯往里面写几行 3 读取csv文件 1 生成一个新的csv文件,并向其中写一点东西 import…

Mybatis学习笔记11 缓存相关

Mybatis学习笔记10 高级映射及延迟加载_biubiubiu0706的博客-CSDN博客 缓存:cache 缓存的作用:通过减少IO的方式,来提高程序的执行效率 Mybatis的缓存:将select语句的查询结果放到缓存(内存)当中,下一次还是这条select语句的话,直接从缓存中取,不再查数据库.一方面是减少了I…

vision transformer

一、网络构建 import torch from torch import nn from functools import partial# --------------------------------------- # # (1)patch embeddingimg_size224 : 输入图像的宽高 patch_size16 : 每个patch的宽高,也是卷积核的…

zabbix监控nginx

目录 一、实验准备 二、监控nginx 一、实验准备 zabbix-sever(192.168.115.4) zabbix-agent(192.168.115.5) 添加监控对象 二、监控nginx 安装NGINX在192.168.115.5上安装NGINX,开启status模块 yum -y install ep…

uniapp:OCR识别身份证上传原图失败,问题解决

1、上传普通图片成功 2、上传>4M | >5M图片失败检查&#xff1a;1、uni.uploadFile自身没有文件大小限制。然而&#xff0c;这仍然取决于你的应用程序所在的平台和存储空间容量。 2、上传照片后不在fail&#xff0c;在sucess 提交照片-3 {"data": "<h…

yolov5自动训练/预测-小白教程

文章目录 引言一、配置参数设置1、数据参数配置2、模型训练参数配置3、模型预测参数配置 二、一键训练/预测的sh介绍1、训练sh文件(train.sh)介绍2、预测sh文件(detect.sh)介绍 三、本文训练main代码解读1、训练main函数解读2、数据加工与参数替换 四、本文预测main代码解读1、…

单片机内存管理

源码说明 源码包含memory.h 和 memory.c 两个文件&#xff08;嵌入式C/C代码的“标配”&#xff09;&#xff0c;其源码中包含重要的注释。 memory.h文件包含结构体等定义&#xff0c;函数API申明等&#xff1b; memory.c文件是实现内存管理相关API函数的原型。 memory.h …

相机HAL

相机HAL 1、概览实现 HAL2、相机 HAL2.1 AIDL 相机 HAL2.2 相机 HAL3 功能2.3 Camera HAL1 概览 相机 HAL 相机 实现 HAL android12-release 1、概览实现 HAL HAL 位于 相机驱动程序 和 更高级别的 Android 框架 之间&#xff0c;它定义您必须实现的接口&#xff0c;以便应用…

城市管网污水监测方案,科技助力污水排放管理!

根据《国务院办公厅关于加强入河入海排污口监督管理工作的实施意见》各地要明确“水污染&#xff0c;谁治理”和政府兜底的原则&#xff0c;明确排污主体责任。根据排污口类型集中整治&#xff0c;划分主体。加大私设暗管借道排污的监察力度溯源主体责任。加强科技研发&#xf…

Java实现添加文字水印、图片水印功能实战

Java实现添加文字水印、图片水印功能实战 本文介绍java实现在图片上加文字水印的方法&#xff0c;水印可以是图片或者文字&#xff0c;操作方便。 java实现给图片添加水印实现步骤&#xff1a; 获取原图片对象信息&#xff08;本地图片或网络图片&#xff09;添加水印&#…

9月20日作业

时钟代码&#xff1a; widget.h #ifndef WIDGET_H #define WIDGET_H#include <QWidget> #include <QPainter> #include <QPaintEvent> #include <QTime> #include <QTimer> #include <QDebug>QT_BEGIN_NAMESPACE namespace Ui { class W…

初识canvas

对于一个前端人员来说&#xff0c;canvas是必须掌握的技能之一。如果你想像画画一样在浏览器上作画&#xff0c;那么canvas就可以做你的画布。 接下啦我们就以画画的标准来初步认识下canvas 1.画布 画画的第一步你得有一张画纸或者画布&#xff0c;canvas标签就是我们的画布…

中华崛起,科技强国!这三款充满科技风的科技模板,一起来探索吧

最近是不是都被华为mate60和苹果15刷屏了啊 &#xff0c;在我们的生活中&#xff0c;科技有着千变万化的面貌。它让我们的世界变得越来越小&#xff0c;让我们的生活越来越便捷。它使我们的梦想成为现实&#xff0c;使我们的思想得以落地。它打开了新的视野&#xff0c;为我们提…