J1学习打卡

  • 🍨 本文为🔗365天深度学习训练营 中的学习记录博客
  • 🍖 原作者:K同学啊
# 数据预处理和加载
import torch
from torch import nn, optim
from torch.utils.data import DataLoader
from torchvision import datasets, transforms, models
import matplotlib.pyplot as plt
import numpy as np
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
data_dir = r"C:\Users\11054\Desktop\kLearning\J1_learning\bird_photos"transform = transforms.Compose([transforms.Resize((224, 224)),transforms.ToTensor(),
])train_dataset = datasets.ImageFolder(data_dir, transform=transform)
train_size = int(0.8 * len(train_dataset))
val_size = len(train_dataset) - train_size
train_dataset, val_dataset = torch.utils.data.random_split(train_dataset, [train_size, val_size])train_loader = DataLoader(train_dataset, batch_size=8, shuffle=True)
val_loader = DataLoader(val_dataset, batch_size=8, shuffle=False)class_names = train_dataset.dataset.classes

在这里插入图片描述

# 定义ResNet 模型
import torch
import torch.nn as nn
import torch.nn.functional as Fclass IdentityBlock(nn.Module):def __init__(self, in_channels, filters, kernel_size):super(IdentityBlock, self).__init__()filters1, filters2, filters3 = filtersself.conv1 = nn.Conv2d(in_channels, filters1, kernel_size=1)self.bn1 = nn.BatchNorm2d(filters1)self.conv2 = nn.Conv2d(filters1, filters2, kernel_size=kernel_size, padding=1)self.bn2 = nn.BatchNorm2d(filters2)self.conv3 = nn.Conv2d(filters2, filters3, kernel_size=1)self.bn3 = nn.BatchNorm2d(filters3)def forward(self, x):shortcut = xx = self.conv1(x)x = self.bn1(x)x = F.relu(x)x = self.conv2(x)x = self.bn2(x)x = F.relu(x)x = self.conv3(x)x = self.bn3(x)x += shortcutx = F.relu(x)return xclass ConvBlock(nn.Module):def __init__(self, in_channels, filters, kernel_size, strides):super(ConvBlock, self).__init__()filters1, filters2, filters3 = filtersself.conv1 = nn.Conv2d(in_channels, filters1, kernel_size=1, stride=strides)self.bn1 = nn.BatchNorm2d(filters1)self.conv2 = nn.Conv2d(filters1, filters2, kernel_size=kernel_size, padding=1)self.bn2 = nn.BatchNorm2d(filters2)self.conv3 = nn.Conv2d(filters2, filters3, kernel_size=1)self.bn3 = nn.BatchNorm2d(filters3)self.shortcut_conv = nn.Conv2d(in_channels, filters3, kernel_size=1, stride=strides)self.shortcut_bn = nn.BatchNorm2d(filters3)def forward(self, x):shortcut = self.shortcut_conv(x)shortcut = self.shortcut_bn(shortcut)x = self.conv1(x)x = self.bn1(x)x = F.relu(x)x = self.conv2(x)x = self.bn2(x)x = F.relu(x)x = self.conv3(x)x = self.bn3(x)x += shortcutx = F.relu(x)return xclass ResNet50(nn.Module):def __init__(self, num_classes=1000):super(ResNet50, self).__init__()self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3)self.bn1 = nn.BatchNorm2d(64)self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)self.layer2 = self._make_layer(64, [64, 64, 256], 3, stride=1)self.layer3 = self._make_layer(256, [128, 128, 512], 4, stride=2)self.layer4 = self._make_layer(512, [256, 256, 1024], 6, stride=2)self.layer5 = self._make_layer(1024, [512, 512, 2048], 3, stride=2)self.avgpool = nn.AdaptiveAvgPool2d((1, 1))self.fc = nn.Linear(2048, num_classes)def _make_layer(self, in_channels, filters, blocks, stride):layers = []layers.append(ConvBlock(in_channels, filters, 3, stride))for _ in range(1, blocks):layers.append(IdentityBlock(filters[2], filters, 3))return nn.Sequential(*layers)def forward(self, x):x = self.conv1(x)x = self.bn1(x)x = F.relu(x)x = self.maxpool(x)x = self.layer2(x)x = self.layer3(x)x = self.layer4(x)x = self.layer5(x)x = self.avgpool(x)x = torch.flatten(x, 1)x = self.fc(x)return x# Example of initialization
model = ResNet50(num_classes=4)
# model = models.resnet50(pretrained=True)
num_ftrs = model.fc.in_features
model.fc = nn.Linear(num_ftrs, len(class_names))
model = model.to(device)
# 定义损失函数和优化器
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(model.parameters(), lr=0.001)
# 训练模型
epochs = 100
train_losses, val_losses = [], []
train_acc, val_acc = [], []best_val_loss = float('inf')
best_model_wts = None  # 用于保存最好的模型权重
for epoch in range(epochs):# Trainingmodel.train()running_loss, running_corrects = 0.0, 0for inputs, labels in train_loader:inputs, labels = inputs.to(device), labels.to(device)optimizer.zero_grad()outputs = model(inputs)loss = criterion(outputs, labels)loss.backward()optimizer.step()running_loss += loss.item() * inputs.size(0)_, preds = torch.max(outputs, 1)running_corrects += torch.sum(preds == labels.data)epoch_loss = running_loss / train_sizeepoch_acc = running_corrects.double() / train_sizetrain_losses.append(epoch_loss)train_acc.append(epoch_acc.item())# Validationmodel.eval()val_running_loss, val_running_corrects = 0.0, 0with torch.no_grad():for inputs, labels in val_loader:inputs, labels = inputs.to(device), labels.to(device)outputs = model(inputs)loss = criterion(outputs, labels)val_running_loss += loss.item() * inputs.size(0)_, preds = torch.max(outputs, 1)val_running_corrects += torch.sum(preds == labels.data)val_epoch_loss = val_running_loss / val_sizeval_epoch_acc = val_running_corrects.double() / val_sizeval_losses.append(val_epoch_loss)val_acc.append(val_epoch_acc.item())if val_epoch_loss < best_val_loss:best_val_loss = val_epoch_lossbest_model_wts = model.state_dict()  # 记录当前模型的权重print(f'Epoch {epoch}/{epochs-1}, Train Loss: {epoch_loss:.4f}, Train Acc: {epoch_acc:.4f}, Val Loss: {val_epoch_loss:.4f}, Val Acc: {val_epoch_acc:.4f}')# 在训练结束后,加载最优的模型权重
model.load_state_dict(best_model_wts)
Epoch 0/99, Train Loss: 0.5125, Train Acc: 0.8119, Val Loss: 3.1043, Val Acc: 0.5221
Epoch 1/99, Train Loss: 0.6260, Train Acc: 0.7788, Val Loss: 0.6525, Val Acc: 0.7522
Epoch 2/99, Train Loss: 0.4564, Train Acc: 0.8429, Val Loss: 1.2441, Val Acc: 0.6814
Epoch 3/99, Train Loss: 0.4463, Train Acc: 0.8230, Val Loss: 0.8466, Val Acc: 0.7699
Epoch 4/99, Train Loss: 0.5827, Train Acc: 0.7898, Val Loss: 0.8394, Val Acc: 0.7788
Epoch 5/99, Train Loss: 0.4685, Train Acc: 0.8385, Val Loss: 0.6826, Val Acc: 0.8142
Epoch 6/99, Train Loss: 0.3892, Train Acc: 0.8606, Val Loss: 0.6440, Val Acc: 0.7699
Epoch 7/99, Train Loss: 0.4116, Train Acc: 0.8606, Val Loss: 0.7322, Val Acc: 0.7876
Epoch 8/99, Train Loss: 0.3453, Train Acc: 0.8872, Val Loss: 0.9246, Val Acc: 0.7522
Epoch 9/99, Train Loss: 0.2964, Train Acc: 0.8805, Val Loss: 0.5056, Val Acc: 0.8584
Epoch 10/99, Train Loss: 0.3832, Train Acc: 0.8739, Val Loss: 0.8432, Val Acc: 0.7345
Epoch 11/99, Train Loss: 0.4082, Train Acc: 0.8628, Val Loss: 1.2504, Val Acc: 0.6637
Epoch 12/99, Train Loss: 0.3812, Train Acc: 0.8562, Val Loss: 0.5781, Val Acc: 0.7699
Epoch 13/99, Train Loss: 0.2939, Train Acc: 0.8982, Val Loss: 0.6404, Val Acc: 0.8407
Epoch 14/99, Train Loss: 0.2671, Train Acc: 0.8960, Val Loss: 0.6122, Val Acc: 0.8230
Epoch 15/99, Train Loss: 0.3338, Train Acc: 0.8850, Val Loss: 0.9396, Val Acc: 0.7699
Epoch 16/99, Train Loss: 0.3182, Train Acc: 0.8872, Val Loss: 0.7527, Val Acc: 0.8584
Epoch 17/99, Train Loss: 0.2798, Train Acc: 0.9137, Val Loss: 0.7588, Val Acc: 0.7522
Epoch 18/99, Train Loss: 0.2432, Train Acc: 0.9159, Val Loss: 0.8711, Val Acc: 0.7699
Epoch 19/99, Train Loss: 0.2381, Train Acc: 0.9204, Val Loss: 0.6623, Val Acc: 0.7965
Epoch 20/99, Train Loss: 0.2503, Train Acc: 0.9159, Val Loss: 1.0319, Val Acc: 0.7168
Epoch 21/99, Train Loss: 0.3165, Train Acc: 0.9049, Val Loss: 0.5331, Val Acc: 0.8496
Epoch 22/99, Train Loss: 0.2036, Train Acc: 0.9292, Val Loss: 0.8623, Val Acc: 0.7611
Epoch 23/99, Train Loss: 0.2089, Train Acc: 0.9292, Val Loss: 0.8315, Val Acc: 0.8142
Epoch 24/99, Train Loss: 0.2094, Train Acc: 0.9336, Val Loss: 0.5755, Val Acc: 0.8053
Epoch 25/99, Train Loss: 0.0996, Train Acc: 0.9690, Val Loss: 0.6812, Val Acc: 0.7699
Epoch 26/99, Train Loss: 0.1375, Train Acc: 0.9558, Val Loss: 0.4544, Val Acc: 0.8850
Epoch 27/99, Train Loss: 0.1011, Train Acc: 0.9646, Val Loss: 0.5622, Val Acc: 0.8407
Epoch 28/99, Train Loss: 0.1597, Train Acc: 0.9447, Val Loss: 0.5689, Val Acc: 0.8407
Epoch 29/99, Train Loss: 0.1708, Train Acc: 0.9491, Val Loss: 0.6313, Val Acc: 0.8319
Epoch 30/99, Train Loss: 0.0951, Train Acc: 0.9668, Val Loss: 0.5573, Val Acc: 0.8496
Epoch 31/99, Train Loss: 0.1465, Train Acc: 0.9602, Val Loss: 0.5064, Val Acc: 0.8584
Epoch 32/99, Train Loss: 0.1095, Train Acc: 0.9624, Val Loss: 0.6120, Val Acc: 0.8319
Epoch 33/99, Train Loss: 0.1096, Train Acc: 0.9690, Val Loss: 0.6218, Val Acc: 0.8053
Epoch 34/99, Train Loss: 0.0894, Train Acc: 0.9646, Val Loss: 0.4840, Val Acc: 0.8673
Epoch 35/99, Train Loss: 0.1467, Train Acc: 0.9314, Val Loss: 0.5605, Val Acc: 0.8761
Epoch 36/99, Train Loss: 0.2331, Train Acc: 0.9447, Val Loss: 0.7342, Val Acc: 0.7876
Epoch 37/99, Train Loss: 0.1630, Train Acc: 0.9336, Val Loss: 0.5327, Val Acc: 0.8496
Epoch 38/99, Train Loss: 0.1293, Train Acc: 0.9624, Val Loss: 1.0636, Val Acc: 0.7434
Epoch 39/99, Train Loss: 0.0954, Train Acc: 0.9646, Val Loss: 0.4450, Val Acc: 0.8938
Epoch 40/99, Train Loss: 0.0402, Train Acc: 0.9912, Val Loss: 0.5242, Val Acc: 0.8407
Epoch 41/99, Train Loss: 0.1280, Train Acc: 0.9624, Val Loss: 0.5269, Val Acc: 0.8319
Epoch 42/99, Train Loss: 0.0788, Train Acc: 0.9779, Val Loss: 0.6632, Val Acc: 0.8319
Epoch 43/99, Train Loss: 0.1128, Train Acc: 0.9668, Val Loss: 0.3365, Val Acc: 0.8761
Epoch 44/99, Train Loss: 0.1162, Train Acc: 0.9646, Val Loss: 0.6866, Val Acc: 0.8142
Epoch 45/99, Train Loss: 0.0266, Train Acc: 0.9956, Val Loss: 0.3973, Val Acc: 0.8850
Epoch 46/99, Train Loss: 0.0931, Train Acc: 0.9690, Val Loss: 0.6352, Val Acc: 0.8319
Epoch 47/99, Train Loss: 0.0777, Train Acc: 0.9735, Val Loss: 0.5743, Val Acc: 0.8496
Epoch 48/99, Train Loss: 0.0473, Train Acc: 0.9889, Val Loss: 0.5463, Val Acc: 0.8319
Epoch 49/99, Train Loss: 0.1480, Train Acc: 0.9535, Val Loss: 1.0142, Val Acc: 0.8407
Epoch 50/99, Train Loss: 0.1329, Train Acc: 0.9513, Val Loss: 0.4691, Val Acc: 0.8673
Epoch 51/99, Train Loss: 0.0330, Train Acc: 0.9867, Val Loss: 0.4812, Val Acc: 0.8496
Epoch 52/99, Train Loss: 0.1050, Train Acc: 0.9535, Val Loss: 0.7743, Val Acc: 0.7788
Epoch 53/99, Train Loss: 0.0767, Train Acc: 0.9735, Val Loss: 0.6740, Val Acc: 0.8142
Epoch 54/99, Train Loss: 0.0483, Train Acc: 0.9779, Val Loss: 0.6069, Val Acc: 0.8230
Epoch 55/99, Train Loss: 0.0923, Train Acc: 0.9757, Val Loss: 0.5565, Val Acc: 0.8673
Epoch 56/99, Train Loss: 0.0940, Train Acc: 0.9690, Val Loss: 0.6511, Val Acc: 0.8230
Epoch 57/99, Train Loss: 0.0310, Train Acc: 0.9867, Val Loss: 0.4568, Val Acc: 0.8496
Epoch 58/99, Train Loss: 0.0073, Train Acc: 1.0000, Val Loss: 0.4516, Val Acc: 0.8496
Epoch 59/99, Train Loss: 0.0033, Train Acc: 1.0000, Val Loss: 0.4458, Val Acc: 0.8850
Epoch 60/99, Train Loss: 0.0055, Train Acc: 0.9978, Val Loss: 0.4935, Val Acc: 0.8584
Epoch 61/99, Train Loss: 0.0030, Train Acc: 1.0000, Val Loss: 0.5033, Val Acc: 0.8496
Epoch 62/99, Train Loss: 0.0098, Train Acc: 0.9956, Val Loss: 0.3741, Val Acc: 0.8673
Epoch 63/99, Train Loss: 0.0201, Train Acc: 0.9889, Val Loss: 0.4065, Val Acc: 0.8584
Epoch 64/99, Train Loss: 0.0158, Train Acc: 0.9956, Val Loss: 0.4000, Val Acc: 0.9027
Epoch 65/99, Train Loss: 0.0077, Train Acc: 0.9978, Val Loss: 0.4236, Val Acc: 0.8761
Epoch 66/99, Train Loss: 0.0034, Train Acc: 1.0000, Val Loss: 0.4047, Val Acc: 0.8938
Epoch 67/99, Train Loss: 0.0099, Train Acc: 0.9978, Val Loss: 0.4296, Val Acc: 0.8673
Epoch 68/99, Train Loss: 0.0170, Train Acc: 0.9956, Val Loss: 0.4366, Val Acc: 0.9115
Epoch 69/99, Train Loss: 0.0578, Train Acc: 0.9867, Val Loss: 0.9006, Val Acc: 0.7699
Epoch 70/99, Train Loss: 0.1552, Train Acc: 0.9624, Val Loss: 1.0190, Val Acc: 0.7522
Epoch 71/99, Train Loss: 0.3006, Train Acc: 0.9071, Val Loss: 1.7312, Val Acc: 0.6726
Epoch 72/99, Train Loss: 0.1259, Train Acc: 0.9535, Val Loss: 0.5290, Val Acc: 0.8496
Epoch 73/99, Train Loss: 0.0361, Train Acc: 0.9845, Val Loss: 0.5585, Val Acc: 0.8319
Epoch 74/99, Train Loss: 0.0485, Train Acc: 0.9779, Val Loss: 0.6037, Val Acc: 0.8407
Epoch 75/99, Train Loss: 0.2948, Train Acc: 0.9049, Val Loss: 1.4896, Val Acc: 0.6726
Epoch 76/99, Train Loss: 0.2515, Train Acc: 0.9270, Val Loss: 1.3241, Val Acc: 0.7080
Epoch 77/99, Train Loss: 0.1719, Train Acc: 0.9403, Val Loss: 0.9907, Val Acc: 0.7876
Epoch 78/99, Train Loss: 0.0785, Train Acc: 0.9779, Val Loss: 0.8646, Val Acc: 0.7699
Epoch 79/99, Train Loss: 0.0347, Train Acc: 0.9889, Val Loss: 0.5678, Val Acc: 0.8407
Epoch 80/99, Train Loss: 0.1509, Train Acc: 0.9447, Val Loss: 0.5656, Val Acc: 0.8142
Epoch 81/99, Train Loss: 0.0736, Train Acc: 0.9779, Val Loss: 0.6753, Val Acc: 0.8053
Epoch 82/99, Train Loss: 0.0637, Train Acc: 0.9823, Val Loss: 0.5300, Val Acc: 0.8584
Epoch 83/99, Train Loss: 0.0454, Train Acc: 0.9757, Val Loss: 0.5306, Val Acc: 0.8584
Epoch 84/99, Train Loss: 0.0407, Train Acc: 0.9889, Val Loss: 0.4931, Val Acc: 0.8407
Epoch 85/99, Train Loss: 0.0100, Train Acc: 1.0000, Val Loss: 0.4908, Val Acc: 0.8673
Epoch 86/99, Train Loss: 0.0071, Train Acc: 0.9978, Val Loss: 0.4836, Val Acc: 0.8761
Epoch 87/99, Train Loss: 0.0094, Train Acc: 0.9978, Val Loss: 0.4489, Val Acc: 0.8761
Epoch 88/99, Train Loss: 0.0033, Train Acc: 1.0000, Val Loss: 0.4582, Val Acc: 0.8761
Epoch 89/99, Train Loss: 0.0015, Train Acc: 1.0000, Val Loss: 0.4960, Val Acc: 0.8761
Epoch 90/99, Train Loss: 0.0027, Train Acc: 1.0000, Val Loss: 0.5174, Val Acc: 0.8584
Epoch 91/99, Train Loss: 0.0086, Train Acc: 0.9978, Val Loss: 0.5599, Val Acc: 0.8319
Epoch 92/99, Train Loss: 0.0074, Train Acc: 0.9978, Val Loss: 0.4926, Val Acc: 0.8673
Epoch 93/99, Train Loss: 0.0052, Train Acc: 1.0000, Val Loss: 0.4914, Val Acc: 0.8407
Epoch 94/99, Train Loss: 0.0025, Train Acc: 1.0000, Val Loss: 0.5375, Val Acc: 0.8584
Epoch 95/99, Train Loss: 0.0013, Train Acc: 1.0000, Val Loss: 0.5106, Val Acc: 0.8761
Epoch 96/99, Train Loss: 0.0011, Train Acc: 1.0000, Val Loss: 0.4826, Val Acc: 0.8584
Epoch 97/99, Train Loss: 0.0088, Train Acc: 0.9978, Val Loss: 0.4799, Val Acc: 0.8584
Epoch 98/99, Train Loss: 0.1045, Train Acc: 0.9535, Val Loss: 0.7483, Val Acc: 0.8230
Epoch 99/99, Train Loss: 0.1666, Train Acc: 0.9425, Val Loss: 0.8003, Val Acc: 0.8319
# 预测模型
model.eval()
plt.figure(figsize=(10, 5))
plt.suptitle("bird")for inputs, labels in val_loader:inputs, labels = inputs.to(device), labels.to(device)outputs = model(inputs)_, preds = torch.max(outputs, 1)for i in range(len(inputs)):ax = plt.subplot(2, 4, i + 1)img = inputs[i].cpu().numpy().transpose((1, 2, 0))plt.imshow(img)plt.title(class_names[preds[i]])plt.axis("off")break

在这里插入图片描述

个人总结

  • 完成了tensorflow到pytorch代码的转换
  • 了解了CNN网络发展历史和残差网络由来
  • 增加训练次数获得了较为准确的模型

本文来自互联网用户投稿,该文观点仅代表作者本人,不代表本站立场。本站仅提供信息存储空间服务,不拥有所有权,不承担相关法律责任。如若转载,请注明出处:http://www.rhkb.cn/news/444231.html

如若内容造成侵权/违法违规/事实不符,请联系长河编程网进行投诉反馈email:809451989@qq.com,一经查实,立即删除!

相关文章

5.C语言基础入门:数据类型、变量声明与创建详解

C语言基础入门&#xff1a;数据类型、变量声明与创建详解 C语言往期系列文章目录 往期回顾&#xff1a; C语言是什么&#xff1f;编程界的‘常青树’&#xff0c;它的辉煌你不可不知VS 2022 社区版C语言的安装教程&#xff0c;不要再卡在下载0B/s啦C语言入门&#xff1a;解锁…

Github优质项目推荐 - 第六期

文章目录 Github优质项目推荐 - 第六期一、【WiFiAnalyzer】&#xff0c;3.4k stars - WiFi 网络分析工具二、【penpot】&#xff0c;33k stars - UI 设计与原型制作平台三、【Inpaint-Anything】&#xff0c;6.4k stars - 修复图像、视频和3D 场景中的任何内容四、【Malware-P…

小程序项目实践(一)--项目的初始化以及前期的准备工作

目录 1.起步 1.1 uni-app 简介 1.2 开发工具 1.2.1 下载 HBuilderX 1.2.2 安装 HBuilderX 1.2.3 安装 scss/sass 编译 1.2.4 快捷键方案切换 1.2.5 修改编辑器的基本设置 1.3 新建 uni-app 项目 1.4 目录结构 1.5 把项目运行到微信开发者工具 1.6 使用 Git 管理项目 …

leetcode hot100_part3_滑动窗口

滑动窗口是有一个基本的模版的&#xff0c;不要自己想当然哦~ 滑动窗口算法思想&#xff08;附经典例题&#xff09;_滑动窗口的思想-CSDN博客 滑动窗口也叫同向双指针&#xff1b;可以先看一下灵山视频&#xff1a;滑动窗口【基础算法精讲 03】_哔哩哔哩_bilibili 3.无重复字…

7.C++面向对象3(拷贝构造函数,赋值运算符重载)

⭐本篇为C学习第7章&#xff0c;主要了解 拷贝构造函数&#xff0c;赋值运算符重载 ⭐本人Gitee C代码仓库&#xff1a;yzc的c学习: 小川c的学习记录 - Gitee.com 上篇讲了6个默认成员函数的构造函数和析构函数。 重要代码如下&#xff1a; #define _CRT_SECURE_NO_WARNINGS…

Mysql(五) --- 数据库设计

文章目录 前言1.范式1.1.第一范式1.1.1 定义1.1.2.例子 1.2.第二范式1.2.1 定义1.2.2 例子1.2.3.不满足第二范式可能会出现的问题 1.3.第三范式1.3.1 定义2.3.2 示例 2. 设计过程3. 实体-关系图3.1 E-R图的基本组成3.2 关系的类型3.2.1 一对一关系(1:1)3.2.2 ⼀对多关系(1:N)3.…

Pr:视频效果快速参考(合集 · 2024版)

Premiere Pro 自带十七组约一百多个视频效果&#xff0c;涵盖了从变换、颜色控制到风格化等多种用途&#xff0c;帮助用户在视频编辑中实现多样化的视觉表现、进行后期处理以及修正各种画质问题。 提示&#xff1a; 点击下面的效果组名称或截图&#xff0c;即可访问该组里面的效…

SF6气体密度监测仪市场研究:主要企业的市场份额已超过37.13%

SF6气体密度监测仪是一种专用于监测和测量六氟化硫&#xff08;SF6&#xff09;气体密度的设备。SF6气体因其优异的绝缘性能和灭弧能力&#xff0c;被广泛应用于电力行业&#xff0c;尤其是在气体绝缘金属封闭开关设备&#xff08;GIS&#xff09;和断路器等关键设备中。随着电…

【自注意力与Transformer架构在自然语言处理中的演变与应用】

背景介绍 在自然语言处理&#xff08;NLP&#xff09;领域&#xff0c;序列到序列&#xff08;seq2seq&#xff09;模型和Transformer架构的出现&#xff0c;极大地推动了机器翻译、文本生成和其他语言任务的进展。传统的seq2seq模型通常依赖于循环神经网络&#xff08;RNN&…

微服务Sleuth解析部署使用全流程

目录 1、Sleuth链路追踪 1、添加依赖 2、修改日志配置文件 3、测试 2、zipkin可视化界面 1、docker安装 2、添加依赖 3、修改配置文件 4、查看页面 5、ribbon配置 1、Sleuth链路追踪 sleuth是链路追踪框架&#xff0c;用于在微服务架构下开发&#xff0c;各个微服务之…

刷题 - 分治

面试经典 150 题 - 分治 148. 排序链表⭐️⭐️⭐️ - 快慢指针找中间节点 - 归并排序 伪代码&#xff1a; 将链表拆分成两半&#xff0c;返回右半边头节点&#xff08;左半边头节点就是原始链表头节点&#xff09;对左边进行排序并返回左边头节点对右边进行排序返回右边头节…

使用jenkins将airflow-dbt部署到服务器上

系列文章目录 文章目录 系列文章目录课程地址YT一、jenkins服务器的初始化配置1.1 运行第一个jenkins pipeline二、编写本地dbt项目2.1 克隆git上的初始文件到本地2.2 本地创建虚拟环境2.3 创建airflow的Dockerfile2.4 安装dbt2.5 创建dbt所需要的snowflake数据库2.6 配置docke…

Android开发视频预览效果

Android开发视频预览效果 视频播放不是一个简单的事情&#xff0c;得有暂停&#xff0c;继续播放等功能&#xff0c;屏幕的适配也是头疼的事情 一、思路&#xff1a; 引用的是腾讯播放器TXVodPlayer 二、效果图&#xff1a; 图片不是很直观&#xff0c;也可以看下视频 And…

渗透测试 之 域渗透手法【域内用户枚举】手法 Kerbrute msf pyKerbrute 工具使用详解

说明一下: 域内用户枚举工具使用说说&#xff1a; Kerbrute pyKerbrute MSF模块的使用 域内用户名枚举原理分析&#xff1a; 域内用户枚举攻击防御&#xff1a; 流量检测&#xff1a; 日志层面&#xff1a; 说明一下: 域环境或者内网环境下&#xff0c;可以在没有域环…

npm ERR! PhantomJS not found on PATH

安装phantomj时发生报错 old core-js versions could cause a slowdown up to 100x even if nothing is polyfilled. Some versions have web compatibility issues. Please, upgrade your dependencies to the actual version of core-js. npm ERR! code 1 npm ERR! path /va…

四、Spring Boot集成Spring Security之认证流程

Spring Boot集成Spring Security之认证流程 一、概要说明二、基于内存的用户名密码1、默认用户名密码2、自定义用户名密码3、为方便测试添加测试接口TestController 三、登录登出重要概念介绍四、登录业务逻辑1、登录业务相关过滤器2、访问业务请求处理流程①、访问业务请求地址…

kubernetes中微服务部署

微服务 问&#xff1a;用控制器来完成集群的工作负载&#xff0c;那么应用如何暴漏出去&#xff1f; 答&#xff1a;需要通过微服务暴漏出去后才能被访问 Service 是一组提供相同服务的Pod对外开放的接口借助Service&#xff0c;应用可以实现服务发现和负载均衡Service 默认只…

初学Qt之环境安装与 hello word

环境&#xff1a; Qt Creator 4.11.0 (Community) Qt 5.14.0 目录 1.Qt环境配置 1.1 下载Qt 5.14.0 1.2 注册Qt账号 1.3 安装Qt 1.4 配置环境变量 2.创建项目 2.1 创建一个项目 2.2 初始代码解析 2.3 可视化GUI ​编辑 2.4 hello word 2.4.1 可视化hello word …

链式二叉树及二叉树各种接口的实现(C)

二叉树的性质 若规定根节点的层数为1&#xff0c;则一棵非空二叉树的第 i i i层上最多有 2 i − 1 2^{i-1} 2i−1个结点.若规定根节点的层数为1&#xff0c;则深度为h的二叉树的最大结点数是 2 h − 1 2^{h}-1 2h−1对任何一棵二叉树&#xff0c;如果度为0其叶结点个数为 n 0 …

Koa学习

Koa 安装与配置 1. 初始化项目 在终端中执行以下命令&#xff1a; # 创建项目文件夹 mkdir koa cd koa# 初始化并安装依赖 npm init -y npm install koa npm install nodemon --save-dev2. 修改 package.json 在 package.json 文件中进行如下修改&#xff1a; {"type…