anaconda安装
pip list 可以看有哪些package包
nvidia-smi查看显卡的状态
安装pytorch
检验pytorch是否安装成功,以及是否pytorch是否可以使用gpu。
(1)查看conda版本
conda --version 或 conda -V
(2)更新conda(将conda自身更新到最新版本)
conda update conda
(3)创建虚拟环境
conda create -n res python=3.7
(4)激活虚拟环境
conda activate res
(5)退出虚拟环境
conda deactivate
(6)删除虚拟环境
conda remove -n env_name --all 或 conda remove --name env_name --all
(7)列出所有虚拟环境
conda env list 或 conda info --envs
(8)列出当前环境的所有包
conda list
(9)安装第三方包
conda install dill 或 pip install dill
(10)卸载第三方包
conda uninstall dill 或 pip uninstall dill
dir():能让我们知道工具箱以及工具箱的分隔区有什么东西
help():能让我们知道每个工具是如何使用的,工具的使用方法
DataSet类
import os from torch.utils.data import Dataset import torchvision from PIL import Image''' demo1:使用PIL打开一个图片 windows下所有的绝对路径都要把\换成\\ 不然就会报错: OSError: [Errno 22] Invalid argument: 'D:\\pycharm download\torch\\dataset\train\x07nts\x013035.jpg' ''' # img_path = "D:\\pycharm download\\torch\\dataset\\train\\ants\\0013035.jpg" # img = Image.open(img_path) # img.show()''' demo2:使用os.listdir打开一列图片 输出: ['0013035.jpg', '1030023514_aad5c608f9.jpg', '1095476100_3906d8afde.jpg', '1099452230_d1949d3250.jpg', '116570827_e9c126745d.jpg', '1225872729_6f0856588f.jpg', '1262877379_64fcada201.jpg', '1269756697_0bce92cdab.jpg', '1286984635_5119e80de1.jpg', ''' # dir_path = "dataset/train/ants" # img_path_list = os.listdir(dir_path) # print(img_path_list)''' demo3:实现自己的dataset类 继承的类,必须实现父类的所有接口/重写方法 继承dataset就是要实现/重写__init__ __getitem__ ''' # class MyData(Dataset): # # def __init__(self): # pass # # def __getitem__(self, item): # passclass MyData(Dataset):def __init__(self, root_dir, label_dir):# self就是把root dir变成一个class中全部def都可以使用的全局变量# 设root dir=dataset/trainself.root_dir = root_dir# 设label dir=antsself.label_dir = label_dirself.path = os.path.join(self.root_dir, self.label_dir)# if label dir = ants, then path=dataset/train/ants# os.listdir could list all ants img 的名字 如0013035.jpg# img path是一个listself.img_path = os.listdir(self.path)def __getitem__(self, idx):"""对MyData对象使用索引操作就会自动来到这个函数下边,双下划线是python中的魔法函数:param idx::return:"""img_name = self.img_path[idx]# 名字是0013035.jpg的图片的路径img_item_path = os.path.join(self.root_dir, self.label_dir, img_name)img = Image.open(img_item_path)label = self.label_dirreturn img, label# 再写一个获取数据集长度的魔法函数def __len__(self):return len(self.img_path)''' demo4:获取蚂蚁数据集dataset ''' root_dir = "./dataset/train" label_dir = "ants" ants_dataset = MyData(root_dir, label_dir) # print(ants_dataset[0]) # # output:(<PIL.JpegImagePlugin.JpegImageFile image mode=RGB size=768x512 at 0x28C66665EE0>, 'ants') # img, label = ants_dataset[0] # img.show() # print(label)''' demo5:再来获取蜜蜂的数据集 ''' root_dir = "./dataset/train" label_dir = "bees" bees_dataset = MyData(root_dir, label_dir)''' demo6:dataset数据集拼接 ''' train_dataset = ants_dataset + bees_dataset
TensorBoard的使用展示数据
from PIL import Image from torch.utils.tensorboard import SummaryWriter import numpy as np from tu_demo1 import MyData''' demo1:summary writer的介绍 Writes entries directly to event files in the log_dir to be consumed by TensorBoard. SummaryWriter把实体直接写入一个目录里边,这个目录可以被TensorBoard读取,然后画成图 SummaryWriter常用只有1个参数log_dir Example:传入log dir参数writer = SummaryWriter("my_experiment")# folder location: my_experiment,比如后边常写的logscomment参数可以给log dir后边加后缀writer = SummaryWriter(comment="LR_0.1_BATCH_16")# folder location: runs/May04_22-14-54_s-MacBook-Pro.localLR_0.1_BATCH_16/# runs/May04_22-14-54_s-MacBook-Pro.local是默认的目录 ''' # writer = SummaryWriter("logs")# writer.add_image() # writer.add_scalar()# writer.close() tensorboard --logdir=logs --port=6007#在指定的端口打开summarywrite类目录''' demo2:写标量数据 tag (string): Data identifier 标识符,表名 scalar_value (float or string/blobname): Value to save y轴 global_step (int): Global step value to record x轴 ''' # writer = SummaryWriter("logs") # # for i in range(100): # writer.add_scalar("y=x summary", i, i) # writer.add_scalar("y=2x summary", 2*i, i) # # writer.close()''' demo3:写图片数据 tag (string): Data identifier img_tensor (torch.Tensor, numpy.array, or string/blobname): Image data global_step (int): Global step value to record 写图片,要求图片必须是上面的三种类型,我们使用numpy来改变图片数据类型使用numpy array会报错,因为你的图片维度顺序不符合writer的要求,报错如下: TypeError: Cannot handle this data type: (1, 1, 512) 改正如下,使用dataformats='HWC'向writer说明我们的三位顺序是HWC: writer.add_image('my_image_HWC', img_HWC, 0, dataformats='HWC') 可以在控制台简单运行一下代码print(img_array.shape),就知道顺序了 ''' # writer = SummaryWriter("logs") # # image_path = "dataset/train/ants/0013035.jpg" # img_pil = Image.open(image_path) # img_array = np.array(img_pil) # # # writer.add_image("img test", img_array, 1) # writer.add_image("img test", img_array, 1, dataformats='HWC') # writer.close()''' demo4:练习使用图片写入 ''' # 获取数据集 root_dir = "./dataset/train" label_dir = "ants" ants_dataset = MyData(root_dir, label_dir) # 写入board step = 1 writer = SummaryWriter("logs") for img, label in ants_dataset:img_array = np.array(img)print(type(img_array), img_array.shape)print("正在写入第{}张图片".format(step))writer.add_image("ants images", img_array, step, dataformats='HWC')step = step + 1 # 最后一定关闭writer writer.close() # 第117张图片的shape是(300,300)没有第三维数据,写不进去,会报错,但是board能存116个 # 有解决办法的欢迎联系我
transforms是用来对图片进行处理
from PIL import Image from torch.utils.tensorboard import SummaryWriter from torchvision import transforms''' demo1:tosensor简单使用 tosensor: Convert a ``PIL Image`` or ``numpy.ndarray`` to tensor example:pil --> tensortensor([[[0.3137, 0.3137, 0.3137, ..., 0.3176, 0.3098, 0.2980],[0.3176, 0.3176, 0.3176, ..., ''' # 获取pil类型图片 img_path = "dataset/train/ants/0013035.jpg" img = Image.open(img_path) # 创建需要的transforms工具,并给工具起名字 tensor_trans = transforms.ToTensor() # 使用工具,根据工具需要的参数自行选择传递 tensor_img = tensor_trans(img)#ctrl+p查看函数需要的参数 print(tensor_img)""" demo2:为什么需要tensor数据类型 因为tensor包含了一些属性是计算神经网络是必不可少的 grad:梯度 device:设备 is CUDA: requires grad:保留梯度 """ # tensor_img.grad = 0 # tensor_img.requires_grad = False''' demo3:使用tensor数据类型写入board ''' writer = SummaryWriter("logs") writer.add_image('tensor img', tensor_img, 1) writer.close() 执行tensorboard --logdir=logs --port=6007命令#在指定的端口打开summarywrite类目录
from PIL import Image from torch.utils.tensorboard import SummaryWriter from torchvision import transforms class Person:def __call__(self,name):print("__call__"+name)def hello(self,name):print("hello"+name) person=Person() person("zhangsan") person.hello("list")''' demo1:魔法函数 实例化一个对象之后,如果这个对象可以不用.方法名,而是可以直接传参数,那说明这个对象就有一个对应的魔法函数 比如getitem call len ''' img_path = "dataset/train/ants/0013035.jpg" img = Image.open(img_path) writer = SummaryWriter('logs')''' demo2:compose的用法 compose就是一个指定一个transform操作序列,定义了一条加工流水线 Example:>>> transforms.Compose([>>> transforms.CenterCrop(10),>>> transforms.PILToTensor(),>>> transforms.ConvertImageDtype(torch.float),>>> ]) '''''' demo3:normalize的用法 计算方法:output[channel] = (input[channel] - mean[channel]) / std[channel] 说人话:该像素上的值减去均值,再除以方差 ''' trans_norm = transforms.Normalize([5, 0.5, 0.5], [0.5, 0.5, 0.5])//因为rgb是三个通道,所以需要三个均值和三个标准差 trans_tensor = transforms.ToTensor() img_tensor = trans_tensor(img) img_norm = trans_norm(img_tensor)writer.add_image('norm img', img_norm) # 在board中可以看到norm之后图片变化很大,因为故意设的均值很大''' demo4:resize的用法 Resize the input image to the given size. 注意如果给了一个int就是变为正方形,给(H,W)才是H W resize不会改变图片的数据类型 ''' #img PIL->img_resize PIL trans_resize = transforms.Resize((512, 500)) img_resize = trans_resize(img) #img_resize PIL->img_resize tensor img_resize = trans_tensor(img_resize) print(img_resize)'''demo5:random crop 随机剪一个指定尺寸的新图片,循环十次。 ''' trans_randomcrop = transforms.RandomCrop(500) trans_compose = transforms.Compose([trans_randomcrop,trans_tensor ]) for i in range(10):img_crop_tensor = trans_compose(img)writer.add_image('crop tensor', img_crop_tensor, i)writer.close()
dataset处理以及dataloader
import torchvision from torch.utils.tensorboard import SummaryWriter from torchvision import transforms''' demo1:使用torchvision获取数据集 前面是数据的类型,后面是图片的标签 数字对应类别 (<PIL.Image.Image image mode=RGB size=32x32 at 0x22F00A48F10>, 6) 类别: ['airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck'] ''' # train_set = torchvision.datasets.CIFAR10(root="./dataset", train=True, download=True)#root="./dataset"指的是下载位置 # test_set = torchvision.datasets.CIFAR10(root="./dataset", train=False, download=True) # print(train_set[0])#打印图片信息 # print(train_set.classes)#打印数据集的类别 # img, target = train_set[0]#返回图片数据img和类别下标target # print(train_set.classes[target]) # img.show()#展示图片''' demo2:使用compose对数据集做transform操作 ''' dataset_trans = transforms.Compose([torchvision.transforms.ToTensor() ])#只对数据集进行totensor的操作 train_set = torchvision.datasets.CIFAR10(root="./dataset", train=True, transform=dataset_trans, download=True) test_set = torchvision.datasets.CIFAR10(root="./dataset", train=False, transform=dataset_trans, download=True)writer = SummaryWriter('logs') for i in range(10):img, target = train_set[i]writer.add_image('test torchvison compose', img, i)writer.close()
import torchvision from torch.utils.tensorboard import SummaryWriter from torchvision import transforms''' demo1:使用torchvision获取数据集 前面是数据的类型,后面是图片的标签 数字对应类别 (<PIL.Image.Image image mode=RGB size=32x32 at 0x22F00A48F10>, 6) 类别: ['airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck'] ''' # train_set = torchvision.datasets.CIFAR10(root="./dataset", train=True, download=True)#root="./dataset"指的是下载位置 # test_set = torchvision.datasets.CIFAR10(root="./dataset", train=False, download=True) # print(train_set[0])#打印图片信息 # print(train_set.classes)#打印数据集的类别 # img, target = train_set[0]#返回图片数据img和类别下标target # print(train_set.classes[target]) # img.show()#展示图片''' demo2:使用compose对数据集做transform操作 ''' dataset_trans = transforms.Compose([torchvision.transforms.ToTensor() ])#只对数据集进行totensor的操作 train_set = torchvision.datasets.CIFAR10(root="./dataset", train=True, transform=dataset_trans, download=True) test_set = torchvision.datasets.CIFAR10(root="./dataset", train=False, transform=dataset_trans, download=True)writer = SummaryWriter('logs') for i in range(10):img, target = train_set[i]writer.add_image('test torchvison compose', img, i)writer.close()
import torchvision from torch.utils.data import DataLoader from torch.utils.tensorboard import SummaryWriter from torchvision import transforms''' demo1:dataloader的简单使用 batch size:loader能每次装弹4枚进入枪膛,或者理解每次抓四张牌 shuffle:每次epoch是否打乱原来的顺序,就像打完一轮牌后,洗不洗牌 drop last:最后的打他不够一个batch 还要不要了。比如100张图片其中每3张是一个batch,会余下一张图片。选择true就会被舍去,选择false最后一个batch就会是1张图片 ''' train_set = torchvision.datasets.CIFAR10("./dataset", train=True, transform=torchvision.transforms.ToTensor(), download=True) test_set = torchvision.datasets.CIFAR10("./dataset", train=False, transform=torchvision.transforms.ToTensor(), download=True)train_loader = DataLoader(dataset=train_set, batch_size=4, shuffle=True, num_workers=0, drop_last=False) test_loader = DataLoader(dataset=test_set, batch_size=4, shuffle=True, num_workers=0, drop_last=False) #测试数据集中第一张图片集target img, target = test_set[0] print(img.shape) print(target) # 使用board可视化 writer = SummaryWriter("logs") step = 0 for data in test_loader:imgs, targets = data# print(imgs.shape)#torch.size([4,3,32,32])也就是4张图片3通道32*32# print(targets)# 这里如果你用add image会报错,因为这个方法只能一次写一个图片,你必须换add images方法来写入带有批处理的图片# writer.add_image("test set loader", imgs, step)writer.add_images("test set loader", imgs, step)step = step + 1writer.close()
import torch from torch import nn""" demo1:写一个最基本的神经网络都要哪些骨头 两个骨头就是骨架:__init__ forward """ class Tudui(nn.module):def __init__(self):super(Tudui, self).__init__()#调用父类的初始化函数def forward(self, input):#定义神经网络的运算步骤output = input + 1return output tudui = Tudui() x = torch.tensor(1.0) output = tudui(x) print(output) """ class Model(nn.Module):def __init__(self):super().__init__()self.conv1 = nn.Conv2d(1, 20, 5)self.conv2 = nn.Conv2d(20, 20, 5)def forward(self, x):#x表示的是输入x = F.relu(self.conv1(x))#x经过一次卷积外加一次非线性处理return F.relu(self.conv2(x))#x再经过一次卷积外加一次非线性处理