深入浅出 diffusion(4):pytorch 实现简单 diffusion

 1. 训练和采样流程

 2. 无条件实现

import torch, time, os
import numpy as np
import torch.nn as nn
import torch.optim as optim
from torchvision.datasets import MNIST
from torchvision import transforms
from torch.utils.data import DataLoader
from torchvision.utils import save_image
import torch.nn.functional as Fclass ResidualConvBlock(nn.Module):def __init__(self, in_channels: int, out_channels: int, is_res: bool = False) -> None:super().__init__()'''standard ResNet style convolutional block'''self.same_channels = in_channels==out_channelsself.is_res = is_resself.conv1 = nn.Sequential(nn.Conv2d(in_channels, out_channels, 3, 1, 1),nn.BatchNorm2d(out_channels),nn.GELU(),)self.conv2 = nn.Sequential(nn.Conv2d(out_channels, out_channels, 3, 1, 1),nn.BatchNorm2d(out_channels),nn.GELU(),)def forward(self, x: torch.Tensor) -> torch.Tensor:if self.is_res:x1 = self.conv1(x)x2 = self.conv2(x1)# this adds on correct residual in case channels have increasedif self.same_channels:out = x + x2else:out = x1 + x2return out / 1.414else:x1 = self.conv1(x)x2 = self.conv2(x1)return x2class UnetDown(nn.Module):def __init__(self, in_channels, out_channels):super(UnetDown, self).__init__()'''process and downscale the image feature maps'''layers = [ResidualConvBlock(in_channels, out_channels), nn.MaxPool2d(2)]self.model = nn.Sequential(*layers)def forward(self, x):return self.model(x)class UnetUp(nn.Module):def __init__(self, in_channels, out_channels):super(UnetUp, self).__init__()'''process and upscale the image feature maps'''layers = [nn.ConvTranspose2d(in_channels, out_channels, 2, 2),ResidualConvBlock(out_channels, out_channels),ResidualConvBlock(out_channels, out_channels),]self.model = nn.Sequential(*layers)def forward(self, x, skip):x = torch.cat((x, skip), 1)x = self.model(x)return xclass EmbedFC(nn.Module):def __init__(self, input_dim, emb_dim):super(EmbedFC, self).__init__()'''generic one layer FC NN for embedding things  '''self.input_dim = input_dimlayers = [nn.Linear(input_dim, emb_dim),nn.GELU(),nn.Linear(emb_dim, emb_dim),]self.model = nn.Sequential(*layers)def forward(self, x):x = x.view(-1, self.input_dim)return self.model(x)
class Unet(nn.Module):def __init__(self, in_channels, n_feat=256):super(Unet, self).__init__()self.in_channels = in_channelsself.n_feat = n_featself.init_conv = ResidualConvBlock(in_channels, n_feat, is_res=True)self.down1 = UnetDown(n_feat, n_feat)self.down2 = UnetDown(n_feat, 2 * n_feat)self.to_vec = nn.Sequential(nn.AvgPool2d(7), nn.GELU())self.timeembed1 = EmbedFC(1, 2 * n_feat)self.timeembed2 = EmbedFC(1, 1 * n_feat)self.up0 = nn.Sequential(# nn.ConvTranspose2d(6 * n_feat, 2 * n_feat, 7, 7), # when concat temb and cemb end up w 6*n_featnn.ConvTranspose2d(2 * n_feat, 2 * n_feat, 7, 7),  # otherwise just have 2*n_featnn.GroupNorm(8, 2 * n_feat),nn.ReLU(),)self.up1 = UnetUp(4 * n_feat, n_feat)self.up2 = UnetUp(2 * n_feat, n_feat)self.out = nn.Sequential(nn.Conv2d(2 * n_feat, n_feat, 3, 1, 1),nn.GroupNorm(8, n_feat),nn.ReLU(),nn.Conv2d(n_feat, self.in_channels, 3, 1, 1),)def forward(self, x, t):'''输入加噪图像和对应的时间step,预测反向噪声的正态分布:param x: 加噪图像:param t: 对应step:return: 正态分布噪声'''x = self.init_conv(x)down1 = self.down1(x)down2 = self.down2(down1)hiddenvec = self.to_vec(down2)# embed time steptemb1 = self.timeembed1(t).view(-1, self.n_feat * 2, 1, 1)temb2 = self.timeembed2(t).view(-1, self.n_feat, 1, 1)# 将上采样输出与step编码相加,输入到下一个上采样层up1 = self.up0(hiddenvec)up2 = self.up1(up1 + temb1, down2)up3 = self.up2(up2 + temb2, down1)out = self.out(torch.cat((up3, x), 1))return outclass DDPM(nn.Module):def __init__(self, model, betas, n_T, device):super(DDPM, self).__init__()self.model = model.to(device)# register_buffer 可以提前保存alpha相关,节约时间for k, v in self.ddpm_schedules(betas[0], betas[1], n_T).items():self.register_buffer(k, v)self.n_T = n_Tself.device = deviceself.loss_mse = nn.MSELoss()def ddpm_schedules(self, beta1, beta2, T):'''提前计算各个step的alpha,这里beta是线性变化:param beta1: beta的下限:param beta2: beta的下限:param T: 总共的step数'''assert beta1 < beta2 < 1.0, "beta1 and beta2 must be in (0, 1)"beta_t = (beta2 - beta1) * torch.arange(0, T + 1, dtype=torch.float32) / T + beta1 # 生成beta1-beta2均匀分布的数组sqrt_beta_t = torch.sqrt(beta_t)alpha_t = 1 - beta_tlog_alpha_t = torch.log(alpha_t)alphabar_t = torch.cumsum(log_alpha_t, dim=0).exp() # alpha累乘sqrtab = torch.sqrt(alphabar_t) # 根号alpha累乘oneover_sqrta = 1 / torch.sqrt(alpha_t) # 1 / 根号alphasqrtmab = torch.sqrt(1 - alphabar_t) # 根号下(1-alpha累乘)mab_over_sqrtmab_inv = (1 - alpha_t) / sqrtmabreturn {"alpha_t": alpha_t,  # \alpha_t"oneover_sqrta": oneover_sqrta,  # 1/\sqrt{\alpha_t}"sqrt_beta_t": sqrt_beta_t,  # \sqrt{\beta_t}"alphabar_t": alphabar_t,  # \bar{\alpha_t}"sqrtab": sqrtab,  # \sqrt{\bar{\alpha_t}} # 加噪标准差"sqrtmab": sqrtmab,  # \sqrt{1-\bar{\alpha_t}}  # 加噪均值"mab_over_sqrtmab": mab_over_sqrtmab_inv,  # (1-\alpha_t)/\sqrt{1-\bar{\alpha_t}}}def forward(self, x):"""训练过程中, 随机选择step和生成噪声"""# 随机选择step_ts = torch.randint(1, self.n_T + 1, (x.shape[0],)).to(self.device)  # t ~ Uniform(0, n_T)# 随机生成正态分布噪声noise = torch.randn_like(x)  # eps ~ N(0, 1)# 加噪后的图像x_tx_t = (self.sqrtab[_ts, None, None, None] * x+ self.sqrtmab[_ts, None, None, None] * noise)# 将unet预测的对应step的正态分布噪声与真实噪声做对比return self.loss_mse(noise, self.model(x_t, _ts / self.n_T))def sample(self, n_sample, size, device):# 随机生成初始噪声图片 x_T ~ N(0, 1)x_i = torch.randn(n_sample, *size).to(device)for i in range(self.n_T, 0, -1):t_is = torch.tensor([i / self.n_T]).to(device)t_is = t_is.repeat(n_sample, 1, 1, 1)z = torch.randn(n_sample, *size).to(device) if i > 1 else 0eps = self.model(x_i, t_is)x_i = x_i[:n_sample]x_i = self.oneover_sqrta[i] * (x_i - eps * self.mab_over_sqrtmab[i]) + self.sqrt_beta_t[i] * zreturn x_iclass ImageGenerator(object):def __init__(self):'''初始化,定义超参数、数据集、网络结构等'''self.epoch = 20self.sample_num = 100self.batch_size = 256self.lr = 0.0001self.n_T = 400self.device = 'cuda' if torch.cuda.is_available() else 'cpu'self.init_dataloader()self.sampler = DDPM(model=Unet(in_channels=1), betas=(1e-4, 0.02), n_T=self.n_T, device=self.device).to(self.device)self.optimizer = optim.Adam(self.sampler.model.parameters(), lr=self.lr)def init_dataloader(self):'''初始化数据集和dataloader'''tf = transforms.Compose([transforms.ToTensor(),])train_dataset = MNIST('./data/',train=True,download=True,transform=tf)self.train_dataloader = DataLoader(train_dataset, batch_size=self.batch_size, shuffle=True, drop_last=True)val_dataset = MNIST('./data/',train=False,download=True,transform=tf)self.val_dataloader = DataLoader(val_dataset, batch_size=self.batch_size, shuffle=False)def train(self):self.sampler.train()print('训练开始!!')for epoch in range(self.epoch):self.sampler.model.train()loss_mean = 0for i, (images, labels) in enumerate(self.train_dataloader):images, labels = images.to(self.device), labels.to(self.device)# 将latent和condition拼接后输入网络loss = self.sampler(images)loss_mean += loss.item()self.optimizer.zero_grad()loss.backward()self.optimizer.step()train_loss = loss_mean / len(self.train_dataloader)print('epoch:{}, loss:{:.4f}'.format(epoch, train_loss))self.visualize_results(epoch)@torch.no_grad()def visualize_results(self, epoch):self.sampler.eval()# 保存结果路径output_path = 'results/Diffusion'if not os.path.exists(output_path):os.makedirs(output_path)tot_num_samples = self.sample_numimage_frame_dim = int(np.floor(np.sqrt(tot_num_samples)))out = self.sampler.sample(tot_num_samples, (1, 28, 28), self.device)save_image(out, os.path.join(output_path, '{}.jpg'.format(epoch)), nrow=image_frame_dim)if __name__ == '__main__':generator = ImageGenerator()generator.train()

3. 有条件实现

import torch, time, os
import numpy as np
import torch.nn as nn
import torch.optim as optim
from torchvision.datasets import MNIST
from torchvision import transforms
from torch.utils.data import DataLoader
from torchvision.utils import save_image
import torch.nn.functional as Fclass ResidualConvBlock(nn.Module):def __init__(self, in_channels: int, out_channels: int, is_res: bool = False) -> None:super().__init__()'''standard ResNet style convolutional block'''self.same_channels = in_channels==out_channelsself.is_res = is_resself.conv1 = nn.Sequential(nn.Conv2d(in_channels, out_channels, 3, 1, 1),nn.BatchNorm2d(out_channels),nn.GELU(),)self.conv2 = nn.Sequential(nn.Conv2d(out_channels, out_channels, 3, 1, 1),nn.BatchNorm2d(out_channels),nn.GELU(),)def forward(self, x: torch.Tensor) -> torch.Tensor:if self.is_res:x1 = self.conv1(x)x2 = self.conv2(x1)# this adds on correct residual in case channels have increasedif self.same_channels:out = x + x2else:out = x1 + x2return out / 1.414else:x1 = self.conv1(x)x2 = self.conv2(x1)return x2class UnetDown(nn.Module):def __init__(self, in_channels, out_channels):super(UnetDown, self).__init__()'''process and downscale the image feature maps'''layers = [ResidualConvBlock(in_channels, out_channels), nn.MaxPool2d(2)]self.model = nn.Sequential(*layers)def forward(self, x):return self.model(x)class UnetUp(nn.Module):def __init__(self, in_channels, out_channels):super(UnetUp, self).__init__()'''process and upscale the image feature maps'''layers = [nn.ConvTranspose2d(in_channels, out_channels, 2, 2),ResidualConvBlock(out_channels, out_channels),ResidualConvBlock(out_channels, out_channels),]self.model = nn.Sequential(*layers)def forward(self, x, skip):x = torch.cat((x, skip), 1)x = self.model(x)return xclass EmbedFC(nn.Module):def __init__(self, input_dim, emb_dim):super(EmbedFC, self).__init__()'''generic one layer FC NN for embedding things  '''self.input_dim = input_dimlayers = [nn.Linear(input_dim, emb_dim),nn.GELU(),nn.Linear(emb_dim, emb_dim),]self.model = nn.Sequential(*layers)def forward(self, x):x = x.view(-1, self.input_dim)return self.model(x)
class Unet(nn.Module):def __init__(self, in_channels, n_feat=256, n_classes=10):super(Unet, self).__init__()self.in_channels = in_channelsself.n_feat = n_featself.init_conv = ResidualConvBlock(in_channels, n_feat, is_res=True)self.down1 = UnetDown(n_feat, n_feat)self.down2 = UnetDown(n_feat, 2 * n_feat)self.to_vec = nn.Sequential(nn.AvgPool2d(7), nn.GELU())self.timeembed1 = EmbedFC(1, 2 * n_feat)self.timeembed2 = EmbedFC(1, 1 * n_feat)self.conditionembed1 = EmbedFC(n_classes, 2 * n_feat)self.conditionembed2 = EmbedFC(n_classes, 1 * n_feat)self.up0 = nn.Sequential(# nn.ConvTranspose2d(6 * n_feat, 2 * n_feat, 7, 7), # when concat temb and cemb end up w 6*n_featnn.ConvTranspose2d(2 * n_feat, 2 * n_feat, 7, 7),  # otherwise just have 2*n_featnn.GroupNorm(8, 2 * n_feat),nn.ReLU(),)self.up1 = UnetUp(4 * n_feat, n_feat)self.up2 = UnetUp(2 * n_feat, n_feat)self.out = nn.Sequential(nn.Conv2d(2 * n_feat, n_feat, 3, 1, 1),nn.GroupNorm(8, n_feat),nn.ReLU(),nn.Conv2d(n_feat, self.in_channels, 3, 1, 1),)def forward(self, x, c, t):'''输入加噪图像和对应的时间step,预测反向噪声的正态分布:param x: 加噪图像:param c: contition向量:param t: 对应step:return: 正态分布噪声'''x = self.init_conv(x)down1 = self.down1(x)down2 = self.down2(down1)hiddenvec = self.to_vec(down2)# embed time steptemb1 = self.timeembed1(t).view(-1, self.n_feat * 2, 1, 1)temb2 = self.timeembed2(t).view(-1, self.n_feat, 1, 1)cemb1 = self.conditionembed1(c).view(-1, self.n_feat * 2, 1, 1)cemb2 = self.conditionembed2(c).view(-1, self.n_feat, 1, 1)# 将上采样输出与step编码相加,输入到下一个上采样层up1 = self.up0(hiddenvec)up2 = self.up1(cemb1 * up1 + temb1, down2)up3 = self.up2(cemb2 * up2 + temb2, down1)out = self.out(torch.cat((up3, x), 1))return outclass DDPM(nn.Module):def __init__(self, model, betas, n_T, device):super(DDPM, self).__init__()self.model = model.to(device)# register_buffer 可以提前保存alpha相关,节约时间for k, v in self.ddpm_schedules(betas[0], betas[1], n_T).items():self.register_buffer(k, v)self.n_T = n_Tself.device = deviceself.loss_mse = nn.MSELoss()def ddpm_schedules(self, beta1, beta2, T):'''提前计算各个step的alpha,这里beta是线性变化:param beta1: beta的下限:param beta2: beta的下限:param T: 总共的step数'''assert beta1 < beta2 < 1.0, "beta1 and beta2 must be in (0, 1)"beta_t = (beta2 - beta1) * torch.arange(0, T + 1, dtype=torch.float32) / T + beta1 # 生成beta1-beta2均匀分布的数组sqrt_beta_t = torch.sqrt(beta_t)alpha_t = 1 - beta_tlog_alpha_t = torch.log(alpha_t)alphabar_t = torch.cumsum(log_alpha_t, dim=0).exp() # alpha累乘sqrtab = torch.sqrt(alphabar_t) # 根号alpha累乘oneover_sqrta = 1 / torch.sqrt(alpha_t) # 1 / 根号alphasqrtmab = torch.sqrt(1 - alphabar_t) # 根号下(1-alpha累乘)mab_over_sqrtmab_inv = (1 - alpha_t) / sqrtmabreturn {"alpha_t": alpha_t,  # \alpha_t"oneover_sqrta": oneover_sqrta,  # 1/\sqrt{\alpha_t}"sqrt_beta_t": sqrt_beta_t,  # \sqrt{\beta_t}"alphabar_t": alphabar_t,  # \bar{\alpha_t}"sqrtab": sqrtab,  # \sqrt{\bar{\alpha_t}} # 加噪标准差"sqrtmab": sqrtmab,  # \sqrt{1-\bar{\alpha_t}}  # 加噪均值"mab_over_sqrtmab": mab_over_sqrtmab_inv,  # (1-\alpha_t)/\sqrt{1-\bar{\alpha_t}}}def forward(self, x, c):"""训练过程中, 随机选择step和生成噪声"""# 随机选择step_ts = torch.randint(1, self.n_T + 1, (x.shape[0],)).to(self.device)  # t ~ Uniform(0, n_T)# 随机生成正态分布噪声noise = torch.randn_like(x)  # eps ~ N(0, 1)# 加噪后的图像x_tx_t = (self.sqrtab[_ts, None, None, None] * x+ self.sqrtmab[_ts, None, None, None] * noise)# 将unet预测的对应step的正态分布噪声与真实噪声做对比return self.loss_mse(noise, self.model(x_t, c, _ts / self.n_T))def sample(self, n_sample, c, size, device):# 随机生成初始噪声图片 x_T ~ N(0, 1)x_i = torch.randn(n_sample, *size).to(device)for i in range(self.n_T, 0, -1):t_is = torch.tensor([i / self.n_T]).to(device)t_is = t_is.repeat(n_sample, 1, 1, 1)z = torch.randn(n_sample, *size).to(device) if i > 1 else 0eps = self.model(x_i, c, t_is)x_i = x_i[:n_sample]x_i = self.oneover_sqrta[i] * (x_i - eps * self.mab_over_sqrtmab[i]) + self.sqrt_beta_t[i] * zreturn x_iclass ImageGenerator(object):def __init__(self):'''初始化,定义超参数、数据集、网络结构等'''self.epoch = 20self.sample_num = 100self.batch_size = 256self.lr = 0.0001self.n_T = 400self.device = 'cuda' if torch.cuda.is_available() else 'cpu'self.init_dataloader()self.sampler = DDPM(model=Unet(in_channels=1), betas=(1e-4, 0.02), n_T=self.n_T, device=self.device).to(self.device)self.optimizer = optim.Adam(self.sampler.model.parameters(), lr=self.lr)def init_dataloader(self):'''初始化数据集和dataloader'''tf = transforms.Compose([transforms.ToTensor(),])train_dataset = MNIST('./data/',train=True,download=True,transform=tf)self.train_dataloader = DataLoader(train_dataset, batch_size=self.batch_size, shuffle=True, drop_last=True)val_dataset = MNIST('./data/',train=False,download=True,transform=tf)self.val_dataloader = DataLoader(val_dataset, batch_size=self.batch_size, shuffle=False)def train(self):self.sampler.train()print('训练开始!!')for epoch in range(self.epoch):self.sampler.model.train()loss_mean = 0for i, (images, labels) in enumerate(self.train_dataloader):images, labels = images.to(self.device), labels.to(self.device)labels = F.one_hot(labels, num_classes=10).float()# 将latent和condition拼接后输入网络loss = self.sampler(images, labels)loss_mean += loss.item()self.optimizer.zero_grad()loss.backward()self.optimizer.step()train_loss = loss_mean / len(self.train_dataloader)print('epoch:{}, loss:{:.4f}'.format(epoch, train_loss))self.visualize_results(epoch)@torch.no_grad()def visualize_results(self, epoch):self.sampler.eval()# 保存结果路径output_path = 'results/Diffusion'if not os.path.exists(output_path):os.makedirs(output_path)tot_num_samples = self.sample_numimage_frame_dim = int(np.floor(np.sqrt(tot_num_samples)))labels = F.one_hot(torch.Tensor(np.repeat(np.arange(10), 10)).to(torch.int64), num_classes=10).to(self.device).float()out = self.sampler.sample(tot_num_samples, labels, (1, 28, 28), self.device)save_image(out, os.path.join(output_path, '{}.jpg'.format(epoch)), nrow=image_frame_dim)if __name__ == '__main__':generator = ImageGenerator()generator.train()

本文来自互联网用户投稿,该文观点仅代表作者本人,不代表本站立场。本站仅提供信息存储空间服务,不拥有所有权,不承担相关法律责任。如若转载,请注明出处:http://www.rhkb.cn/news/247798.html

如若内容造成侵权/违法违规/事实不符,请联系长河编程网进行投诉反馈email:809451989@qq.com,一经查实,立即删除!

相关文章

基于Redis的高可用分布式锁——RedLock

目录 RedLock简介 RedLock工作流程 获取锁 释放锁 RedLock简介 Redis作者提出来的高可用分布式锁由多个完全独立的Redis节点组成&#xff0c;注意是完全独立&#xff0c;而不是主从关系或者集群关系&#xff0c;并且一般是要求分开机器部署的利用分布式高可以系统中大多数存…

基于ncurse的floppy_bird小游戏

1. 需求分析 将运动分解为鸟的垂直运动和杆的左右运动。 2. 概要设计 2.1 鸟运动部分 2.2 杆的运动 3. 代码实现 #include <stdio.h> #include <ncurses.h>#include <stdlib.h> #include <time.h>int vx 0; int vy 1;int bird_r; int bird_c;int…

2023年算法CDO-CNN-BiLSTM-ATTENTION回归预测(matlab)

2023年算法CDO-CNN-BiLSTM-ATTENTION回归预测&#xff08;matlab&#xff09; CDO-CNN-BiLSTM-Attention切诺贝利灾难优化器优化卷积-长短期记忆神经网络结合注意力机制的数据回归预测 Matlab语言。 切诺贝利灾难优化器Chernobyl Disaster Optimizer (CDO)是H. Shehadeh于202…

力扣题集(第一弹)

一日练,一日功;一日不练十日空。 学编程离不开刷题&#xff0c;接下来让我们来看几个力扣上的题目。 1. 242. 有效的字母异位词 题目描述 给定两个字符串 s 和 t &#xff0c;编写一个函数来判断 t 是否是 s 的字母异位词。 注意&#xff1a;若 s 和 t 中每个字符出现的次数…

详解OpenHarmony各部分文件在XR806上的编译顺序

大家好&#xff0c;今天我们来谈一谈编程时一个很有趣的话题——编译顺序。我知道&#xff0c;一提到编译可能大家会感到有点儿头疼&#xff0c;但请放心&#xff0c;我不会让大家头疼的。我们要明白&#xff0c;在开始写代码之前&#xff0c;了解整个程序的编译路径是十分有必…

[晓理紫]每日论文分享(有中文摘要,源码或项目地址)--大模型、扩散模型、视觉语言导航

专属领域论文订阅 VX 关注{晓理紫}&#xff0c;每日更新论文&#xff0c;如感兴趣&#xff0c;请转发给有需要的同学&#xff0c;谢谢支持 如果你感觉对你有所帮助&#xff0c;请关注我&#xff0c;每日准时为你推送最新论文。 为了答谢各位网友的支持&#xff0c;从今日起免费…

【国产MCU】-认识CH32V307及开发环境搭建

认识CH32V307及开发环境搭建 文章目录 认识CH32V307及开发环境搭建1、CH32V307介绍2、开发环境搭建3、程序固件下载1、CH32V307介绍 CH32V307是沁恒推出的一款基于32位RISC-V设计的互联型微控制器,配备了硬件堆栈区、快速中断入口,在标准RISC-V基础上大大提高了中断响应速度…

Unity3d实现简单的战斗

使用u3d实现一个简单的战斗demo&#xff0c;记下学到的知识点&#xff0c;以备后查。 1.判断鼠标是否点中制定物体 if (Input.GetMouseButton(0)) {Ray ray Camera.main.ScreenPointToRay(Input.mousePosition);if (Physics.Raycast(ray, out RaycastHit hit)){//坐标转换Ve…

Docker 安装篇(Ubuntu)

图省事一般采用第一种 一、 直接采用apt安装 apt install docker.io查看 /usr/lib/systemd/system/docker.service ubuntu默认守护进程用的&#xff1a;fd:// ps -ef | grep docker root 775237 1 0 11:14 ? 00:01:07 /usr/bin/dockerd -H fd:// --cont…

Python qt.qpa.xcb: could not connect to display解决办法

遇到问题&#xff1a;qt.qpa.xcb: could not connect to display 解决办法&#xff0c;在命令行输入&#xff1a; export DISPLAY:0 然后重新跑python程序&#xff0c;解决&#xff01; 参考博客&#xff1a;qt.qpa.xcb: could not connect to displayqt.qpa.plugin: Could …

Mysql-事务(隔离级别,事务底层原理,MVCC)

什么是事务&#xff1f;有哪些特性&#xff1f; 事务&#xff1a;事务指的是逻辑上的一组操作&#xff0c;组成这组操作的各个单元要么全都成功&#xff0c;要么全都失败。 事务特性&#xff1a; 原子性&#xff08;Atomicity&#xff09;&#xff1a; 原子性是指事务是一个不…

window下如何安装ffmpeg(跨平台多媒体处理工具)

ffmpeg是什么? FFmpeg是一个开源的跨平台多媒体处理工具&#xff0c;可以用于录制、转换和流媒体处理音视频。它包含了几个核心库和工具&#xff0c;可以在命令行下执行各种音视频处理操作&#xff0c;如剪辑、分割、合并、媒体格式转换、编解码、流媒体传输等。FFmpeg支持多…

探索Java中最常用的框架:Spring、Spring MVC、Spring Boot、MyBatis和Netty

目录 前言 Spring框架 Spring MVC框架 Spring Boot框架 MyBatis框架 Netty框架 结语 作者简介&#xff1a; 懒大王敲代码&#xff0c;计算机专业应届生 今天给大家聊聊探索Java中最常用的框架&#xff1a;Spring、Spring MVC、Spring Boot、MyBatis和Netty&#xff0c;希…

解锁Web3:数字未来的大门

随着科技的不断推进&#xff0c;我们正站在数字时代的新门槛上。Web3&#xff0c;作为互联网的下一个演进阶段&#xff0c;正在逐渐揭开数字未来的面纱。本文将深入探讨Web3的本质、对社会的影响以及在数字时代中所扮演的关键角色。 什么是Web3&#xff1f; Web3是互联网发展的…

Mysql 更新数据

MySQL中使用UPDATE语句更新表中的记录&#xff0c;可以更新特定的行或者同时更新所有的行。基本语法结构如下&#xff1a; UPDATE table_name SET column_name1 value1,column_name2 value2,……, column_namen valuen WHERE(condition); column_name1,column_name2,……,…

嵌入式学习 Day13

一. 指针总结 1.指针概念 a.指针 --- 地址 ---内存单元编号 b.指针 --- 数据类型 ---指针类型 不同语境: 定义一个指针 //指针类型的变量 打印某个变量的指针 //指针 --地址 2.指针变量的定义 基类型 * 变量名 a.基类型 …

Python爬虫解析库安装

解析库的安装 抓取网页代码之后&#xff0c;下一步就是从网页中提取信息。提取信息的方式有多种多样&#xff0c;可以使用正则来提取&#xff0c;但是写起来相对比较烦琐。这里还有许多强大的解析库&#xff0c;如 lxml、Beautiful Soup、pyquery 等。此外&#xff0c;还提供了…

【C/C++ 02】希尔排序

希尔排序虽然是直接插入排序的升级版本&#xff0c;和插入排序有着相同的特性&#xff0c;即原始数组有序度越高则算法的时间复杂度越低&#xff08;预排序机制&#xff09;&#xff0c;但是是不稳定排序算法。 为了降低算法的时间复杂度&#xff0c;所以我们需要在排序之前尽…

美化背景(拼图小游戏)

package Puzzlegame.com.wxj.ui;import javax.swing.*; import javax.swing.border.BevelBorder; import java.util.Random;public class GameJframe extends JFrame { //游戏主界面 //创建一个二维数组//目的&#xff1a;管理数据//加载图片的时候&#xff0c;会根据二维数组中…

BabylonJS 6.0文档 Deep Dive 摄像机(六):遮罩层和多相机纹理

1. 使用遮罩层来处理多个摄影机和多网格物体 LayerMask是分配给每个网格&#xff08;Mesh&#xff09;和摄像机&#xff08;Camera&#xff09;的一个数。它用于位&#xff08;bit&#xff09;级别用来指示灯光和摄影机是否应照射或显示网格物体。默认值为0x0FFFFFFF&#xff…