OpenMMlab导出yolov3的onnx模型并推理

手动导出

直接使用脚本

import torch
from mmdet.apis import init_detector, inference_detectorconfig_file = './configs/yolo/yolov3_mobilenetv2_8xb24-ms-416-300e_coco.py'
checkpoint_file = 'yolov3_mobilenetv2_mstrain-416_300e_coco_20210718_010823-f68a07b3.pth'
model = init_detector(config_file, checkpoint_file, device='cpu')  # or device='cuda:0'
torch.onnx.export(model, (torch.zeros(1, 3, 416, 416),), "out.onnx", opset_version=11)

导出的onnx结构如下:
在这里插入图片描述
输出是包含三个不同层级检测头的输出。若需要合并检测结果,需要修改脚本如下:

import torch
from itertools import repeat
from mmdet.apis import init_detector, inference_detectorconfig_file = './configs/yolo/yolov3_mobilenetv2_8xb24-ms-416-300e_coco.py'
checkpoint_file = 'yolov3_mobilenetv2_mstrain-416_300e_coco_20210718_010823-f68a07b3.pth'
model = init_detector(config_file, checkpoint_file, device='cpu')  # or device='cuda:0'class YOLOV3(torch.nn.Module):def __init__(self):super().__init__()self.model = init_detector(config_file, checkpoint_file, device='cpu')self.class_num = 80self.base_sizes = [[(116, 90), (156, 198), (373, 326)], [(30, 61), (62, 45), (59, 119)], [(10, 13), (16, 30), (33, 23)]]self.stride = [32, 16, 8]self.strides = [tuple(repeat(x, 2)) for x in self.stride]self.centers = [(x[0] / 2., x[1] / 2.) for x in self.strides]self.base_anchors=self.gen_base_anchors()def gen_base_anchors(self):multi_level_base_anchors = []for i, base_sizes_per_level in enumerate(self.base_sizes):center = self.centers[i]x_center, y_center = centerbase_anchors = []for base_size in base_sizes_per_level:w, h = base_sizebase_anchor = torch.Tensor([x_center - 0.5 * w, y_center - 0.5 * h, x_center + 0.5 * w, y_center + 0.5 * h])base_anchors.append(base_anchor)base_anchors = torch.stack(base_anchors, dim=0)multi_level_base_anchors.append(base_anchors)return multi_level_base_anchorsdef _meshgrid(self, x, y):xx = x.repeat(y.shape[0])yy = y.view(-1, 1).repeat(1, x.shape[0]).view(-1)return xx, yydef grid_priors(self, featmap_sizes):multi_level_anchors = []for i in range(len(featmap_sizes)):base_anchors = self.base_anchors[i]feat_h, feat_w = featmap_sizes[i]stride_w, stride_h = self.strides[i]shift_x = torch.arange(0, feat_w) * stride_wshift_y = torch.arange(0, feat_h) * stride_hshift_xx, shift_yy = self._meshgrid(shift_x, shift_y)shifts = torch.stack([shift_xx, shift_yy, shift_xx, shift_yy], dim=-1)anchors = base_anchors[None, :, :] + shifts[:, None, :]anchors = anchors.view(-1, 4)           multi_level_anchors.append(anchors)return multi_level_anchorsdef decode(self, bboxes, pred_bboxes, stride):xy_centers = (bboxes[..., :2] + bboxes[..., 2:]) * 0.5 + (pred_bboxes[..., :2] - 0.5) * stridewhs = (bboxes[..., 2:] - bboxes[..., :2]) * 0.5 * pred_bboxes[..., 2:].exp()decoded_bboxes = torch.stack((xy_centers[..., 0] - whs[..., 0], xy_centers[..., 1] - whs[..., 1], xy_centers[..., 0] + whs[..., 0], xy_centers[..., 1] + whs[..., 1]), dim=-1)return decoded_bboxesdef forward(self, x):x = self.model.backbone(x)x = self.model.neck(x)pred_maps = self.model.bbox_head(x)flatten_preds = []flatten_strides = []for pred, stride in zip(pred_maps[0], self.stride):pred = pred.permute(0, 2, 3, 1).reshape(1, -1, 5+self.class_num)pred[..., :2] = pred[..., :2].sigmoid()flatten_preds.append(pred)flatten_strides.append(pred.new_tensor(stride).expand(pred.size(1)))flatten_preds = torch.cat(flatten_preds, dim=1)flatten_bbox_preds = flatten_preds[..., :4]  flatten_objectness = flatten_preds[..., 4].sigmoid()flatten_preds[..., 4] = flatten_objectnessflatten_cls_scores = flatten_preds[..., 5:].sigmoid()flatten_preds[..., 5:] = flatten_cls_scoresfeatmap_sizes = [pred_map.shape[-2:] for pred_map in pred_maps[0]]mlvl_anchors = self.grid_priors(featmap_sizes)flatten_anchors = torch.cat(mlvl_anchors)flatten_strides = torch.cat(flatten_strides)flatten_bboxes = self.decode(flatten_anchors, flatten_bbox_preds, flatten_strides.unsqueeze(-1))flatten_preds[..., :4] = flatten_bboxesreturn flatten_predsmodel = YOLOV3().eval()
input = torch.zeros(1, 3, 416, 416, device='cpu')
torch.onnx.export(model, input, "out.onnx", opset_version=11)

导出的onnx结构如下:
在这里插入图片描述
使用onnxruntime推理:

import cv2
import numpy as np
import onnxruntimeCLASSES = ['person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train', 'truck', 'boat', 'traffic light','fire hydrant', 'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow','elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee','skis', 'snowboard', 'sports ball', 'kite', 'baseball bat', 'baseball glove', 'skateboard', 'surfboard','tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple','sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch','potted plant', 'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote', 'keyboard', 'cell phone','microwave', 'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear','hair drier', 'toothbrush'] #coco80类别use_letterbox = True
input_shape = (416,416)      class YOLOV3():def __init__(self, onnxpath):self.onnx_session = onnxruntime.InferenceSession(onnxpath,providers=['CPUExecutionProvider'])self.input_name = self.get_input_name()self.output_name = self.get_output_name()#-------------------------------------------------------#   获取输入输出的名字#-------------------------------------------------------def get_input_name(self):input_name = []for node in self.onnx_session.get_inputs():input_name.append(node.name)return input_namedef get_output_name(self):output_name = []for node in self.onnx_session.get_outputs():output_name.append(node.name)return output_name#-------------------------------------------------------#   输入图像#-------------------------------------------------------def get_input_feed(self, img_tensor):input_feed = {}for name in self.input_name:input_feed[name]=img_tensorreturn input_feed#-------------------------------------------------------#   1.cv2读取图像并resize#	2.图像转BGR2RGB和HWC2CHW#	3.图像归一化#	4.图像增加维度#	5.onnx_session 推理#-------------------------------------------------------def inference(self, img):     if use_letterbox:or_img = letterbox(img, input_shape)else:or_img = cv2.resize(img, input_shape)img = or_img[:,:,::-1].transpose(2,0,1)  #BGR2RGB和HWC2CHWimg = img.astype(dtype=np.float32)img[0,:] = (img[0,:] - 123.675) / 58.395   img[1,:] = (img[1,:] - 116.28) / 57.12img[2,:] = (img[2,:] - 103.53) / 57.375img = np.expand_dims(img,axis=0)input_feed = self.get_input_feed(img)pred = self.onnx_session.run(None, input_feed)return pred, or_img#dets:  array [x,6] 6个值分别为x1,y1,x2,y2,score,class 
#thresh: 阈值
def nms(dets, thresh):x1 = dets[:, 0]y1 = dets[:, 1]x2 = dets[:, 2]y2 = dets[:, 3]#-------------------------------------------------------#   计算框的面积#	置信度从大到小排序#-------------------------------------------------------areas = (y2 - y1 + 1) * (x2 - x1 + 1)scores = dets[:, 4]keep = []index = scores.argsort()[::-1] while index.size > 0:i = index[0]keep.append(i)#-------------------------------------------------------#   计算相交面积#	1.相交#	2.不相交#-------------------------------------------------------x11 = np.maximum(x1[i], x1[index[1:]]) y11 = np.maximum(y1[i], y1[index[1:]])x22 = np.minimum(x2[i], x2[index[1:]])y22 = np.minimum(y2[i], y2[index[1:]])w = np.maximum(0, x22 - x11 + 1)                              h = np.maximum(0, y22 - y11 + 1) overlaps = w * h#-------------------------------------------------------#   计算该框与其它框的IOU,去除掉重复的框,即IOU值大的框#	IOU小于thresh的框保留下来#-------------------------------------------------------ious = overlaps / (areas[i] + areas[index[1:]] - overlaps)idx = np.where(ious <= thresh)[0]index = index[idx + 1]return keepdef xywh2xyxy(x):# [x, y, w, h] to [x1, y1, x2, y2]y = np.copy(x)y[:, 0] = x[:, 0] - x[:, 2] / 2y[:, 1] = x[:, 1] - x[:, 3] / 2y[:, 2] = x[:, 0] + x[:, 2] / 2y[:, 3] = x[:, 1] + x[:, 3] / 2return ydef filter_box(org_box, conf_thres, iou_thres): #过滤掉无用的框#-------------------------------------------------------#   删除为1的维度#	删除置信度小于conf_thres的BOX#-------------------------------------------------------org_box = np.squeeze(org_box)conf = org_box[..., 4] > conf_thresbox = org_box[conf == True]#-------------------------------------------------------#	通过argmax获取置信度最大的类别#-------------------------------------------------------cls_cinf = box[..., 5:]cls = []for i in range(len(cls_cinf)):cls.append(int(np.argmax(cls_cinf[i])))all_cls = list(set(cls))    #-------------------------------------------------------#   分别对每个类别进行过滤#	1.将第6列元素替换为类别下标#	2.xywh2xyxy 坐标转换#	3.经过非极大抑制后输出的BOX下标#	4.利用下标取出非极大抑制后的BOX#-------------------------------------------------------output = []for i in range(len(all_cls)):curr_cls = all_cls[i]curr_cls_box = []curr_out_box = []for j in range(len(cls)):if cls[j] == curr_cls:box[j][4] *= cls_cinf[j][curr_cls]box[j][5] = curr_clscurr_cls_box.append(box[j][:6])curr_cls_box = np.array(curr_cls_box)curr_out_box = nms(curr_cls_box,iou_thres)for k in curr_out_box:output.append(curr_cls_box[k])output = np.array(output)return outputdef letterbox(im, new_shape=(416, 416), color=(114, 114, 114)):# Resize and pad image while meeting stride-multiple constraintsshape = im.shape[:2]  # current shape [height, width]# Scale ratio (new / old)r = min(new_shape[0] / shape[0], new_shape[1] / shape[1])# Compute paddingnew_unpad = int(round(shape[1] * r)), int(round(shape[0] * r))    dw, dh = (new_shape[1] - new_unpad[0])/2, (new_shape[0] - new_unpad[1])/2  # wh padding top, bottom = int(round(dh - 0.1)), int(round(dh + 0.1))left, right = int(round(dw - 0.1)), int(round(dw + 0.1))if shape[::-1] != new_unpad:  # resizeim = cv2.resize(im, new_unpad, interpolation=cv2.INTER_LINEAR)im = cv2.copyMakeBorder(im, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color)  # add borderreturn imdef scale_boxes(input_shape, boxes, shape):# Rescale boxes (xyxy) from input_shape to shapegain = min(input_shape[0] / shape[0], input_shape[1] / shape[1])  # gain  = old / newpad = (input_shape[1] - shape[1] * gain) / 2, (input_shape[0] - shape[0] * gain) / 2  # wh paddingboxes[..., [0, 2]] -= pad[0]  # x paddingboxes[..., [1, 3]] -= pad[1]  # y paddingboxes[..., :4] /= gainboxes[..., [0, 2]] = boxes[..., [0, 2]].clip(0, shape[1])  # x1, x2boxes[..., [1, 3]] = boxes[..., [1, 3]].clip(0, shape[0])  # y1, y2return boxesdef draw(image, box_data):box_data = scale_boxes(input_shape, box_data, image.shape)boxes = box_data[...,:4].astype(np.int32) scores = box_data[...,4]classes = box_data[...,5].astype(np.int32)for box, score, cl in zip(boxes, scores, classes):top, left, right, bottom = boxprint('class: {}, score: {}, coordinate: [{}, {}, {}, {}]'.format(CLASSES[cl], score, top, left, right, bottom))cv2.rectangle(image, (top, left), (right, bottom), (255, 0, 0), 1)cv2.putText(image, '{0} {1:.2f}'.format(CLASSES[cl], score), (top, left), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 0, 255), 1)if __name__=="__main__":model = YOLOV3('yolov3.onnx')img = cv2.imread('bus.jpg')output, _ = model.inference(img)outbox = filter_box(output,0.5,0.5)draw(img,outbox)cv2.imwrite('res.jpg',img)

使用mmdeploy导出

安装mmdeploy的话,可以通过下面脚本导出onnx模型。

from mmdeploy.apis import torch2onnx
from mmdeploy.backend.sdk.export_info import export2SDKimg = 'demo.JPEG'
work_dir = './work_dir/onnx/yolov3'
save_file = './end2end.onnx'
deploy_cfg = 'mmdeploy/configs/mmdet/detection/detection_onnxruntime_dynamic.py'
model_cfg = 'mmdetection/configs/yolo/yolov3_mobilenetv2_8xb24-ms-416-300e_coco.py'
model_checkpoint = 'checkpoints/yolov3_mobilenetv2_mstrain-416_300e_coco_20210718_010823-f68a07b3.pth'
device = 'cpu'# 1. convert model to onnx
torch2onnx(img, work_dir, save_file, deploy_cfg, model_cfg, model_checkpoint, device)# 2. extract pipeline info for sdk use (dump-info)
export2SDK(deploy_cfg, model_cfg, work_dir, pth=model_checkpoint, device=device)

onnx模型的结构如下:
在这里插入图片描述

使用onnxruntime推理:

import cv2
import numpy as np
import onnxruntimeCLASSES = ['person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train', 'truck', 'boat', 'traffic light','fire hydrant', 'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow','elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee','skis', 'snowboard', 'sports ball', 'kite', 'baseball bat', 'baseball glove', 'skateboard', 'surfboard','tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple','sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch','potted plant', 'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote', 'keyboard', 'cell phone','microwave', 'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear','hair drier', 'toothbrush'] #coco80类别use_letterbox = True
input_shape = (416,416)      class YOLOV3():def __init__(self, onnxpath):self.onnx_session = onnxruntime.InferenceSession(onnxpath,providers=['CPUExecutionProvider'])self.input_name = self.get_input_name()self.output_name = self.get_output_name()#-------------------------------------------------------#   获取输入输出的名字#-------------------------------------------------------def get_input_name(self):input_name = []for node in self.onnx_session.get_inputs():input_name.append(node.name)return input_namedef get_output_name(self):output_name = []for node in self.onnx_session.get_outputs():output_name.append(node.name)return output_name#-------------------------------------------------------#   输入图像#-------------------------------------------------------def get_input_feed(self, img_tensor):input_feed = {}for name in self.input_name:input_feed[name]=img_tensorreturn input_feed#-------------------------------------------------------#   1.cv2读取图像并resize#	2.图像转BGR2RGB和HWC2CHW#	3.图像归一化#	4.图像增加维度#	5.onnx_session 推理#-------------------------------------------------------def inference(self, img):     if use_letterbox:or_img = letterbox(img, input_shape)else:or_img = cv2.resize(img, input_shape)img = or_img[:,:,::-1].transpose(2,0,1)  #BGR2RGB和HWC2CHWimg = img.astype(dtype=np.float32)img[0,:] = (img[0,:] - 123.675) / 58.395   img[1,:] = (img[1,:] - 116.28) / 57.12img[2,:] = (img[2,:] - 103.53) / 57.375img = np.expand_dims(img,axis=0)input_feed = self.get_input_feed(img)pred = self.onnx_session.run(None, input_feed)return pred, or_imgdef filter_box(org_box, conf_thres): #删除置信度小于conf_thres的BOXflag = org_box[0][..., 4] > conf_thresbox = org_box[0][flag == True] cls = org_box[1][flag == True].reshape(-1, 1) output = np.concatenate((box, cls), axis=1)  return outputdef letterbox(im, new_shape=(416, 416), color=(114, 114, 114)):# Resize and pad image while meeting stride-multiple constraintsshape = im.shape[:2]  # current shape [height, width]# Scale ratio (new / old)r = min(new_shape[0] / shape[0], new_shape[1] / shape[1])# Compute paddingnew_unpad = int(round(shape[1] * r)), int(round(shape[0] * r))    dw, dh = (new_shape[1] - new_unpad[0])/2, (new_shape[0] - new_unpad[1])/2  # wh padding top, bottom = int(round(dh - 0.1)), int(round(dh + 0.1))left, right = int(round(dw - 0.1)), int(round(dw + 0.1))if shape[::-1] != new_unpad:  # resizeim = cv2.resize(im, new_unpad, interpolation=cv2.INTER_LINEAR)im = cv2.copyMakeBorder(im, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color)  # add borderreturn imdef scale_boxes(input_shape, boxes, shape):# Rescale boxes (xyxy) from input_shape to shapegain = min(input_shape[0] / shape[0], input_shape[1] / shape[1])  # gain  = old / newpad = (input_shape[1] - shape[1] * gain) / 2, (input_shape[0] - shape[0] * gain) / 2  # wh paddingboxes[..., [0, 2]] -= pad[0]  # x paddingboxes[..., [1, 3]] -= pad[1]  # y paddingboxes[..., :4] /= gainboxes[..., [0, 2]] = boxes[..., [0, 2]].clip(0, shape[1])  # x1, x2boxes[..., [1, 3]] = boxes[..., [1, 3]].clip(0, shape[0])  # y1, y2return boxesdef draw(image,box_data):box_data = scale_boxes(input_shape, box_data, image.shape)boxes = box_data[...,:4].astype(np.int32) scores = box_data[...,4]classes = box_data[...,5].astype(np.int32)for box, score, cl in zip(boxes, scores, classes):top, left, right, bottom = boxprint('class: {}, score: {}, coordinate: [{}, {}, {}, {}]'.format(CLASSES[cl], score, top, left, right, bottom))cv2.rectangle(image, (top, left), (right, bottom), (255, 0, 0), 1)cv2.putText(image, '{0} {1:.2f}'.format(CLASSES[cl], score), (top, left), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 0, 255), 1)if __name__=="__main__":model = YOLOV3('../work_dir/onnx/yolov3/end2end.onnx')img = cv2.imread('bus.jpg')output, _ = model.inference(img)outbox = filter_box(output,0.5,0.5)draw(img,outbox)cv2.imwrite('res.jpg',img)

使用mmdeploy推理:

from mmdeploy.apis import inference_modelmodel_cfg = 'mmdetection/configs/yolo/yolov3_mobilenetv2_8xb24-ms-416-300e_coco.py'
deploy_cfg = 'mmdeploy/configs/mmdet/detection/detection_onnxruntime_dynamic.py'
img = 'mmdetection/demo/demo.jpg'
backend_files = ['work_dir/onnx/yolov3/end2end.onnx']
device = 'cpu'result = inference_model(model_cfg, deploy_cfg, backend_files, img, device)
print(result)

或者

from mmdeploy_runtime import Detector
import cv2# 读取图片
img = cv2.imread('mmdetection/demo/demo.jpg')# 创建检测器
detector = Detector(model_path='work_dir/onnx/yolov3', device_name='cpu')
# detector = Detector(model_path='work_dir/onnx/yolox', device_name='cpu')
# detector = Detector(model_path='work_dir/trt/yolox', device_name='cuda')
# 执行推理
bboxes, labels, _ = detector(img)
# 使用阈值过滤推理结果,并绘制到原图中
indices = [i for i in range(len(bboxes))]
for index, bbox, label_id in zip(indices, bboxes, labels):[left, top, right, bottom], score = bbox[0:4].astype(int),  bbox[4]if score < 0.3:continuecv2.rectangle(img, (left, top), (right, bottom), (0, 255, 0))
cv2.imwrite('output_detection.png', img)

本文来自互联网用户投稿,该文观点仅代表作者本人,不代表本站立场。本站仅提供信息存储空间服务,不拥有所有权,不承担相关法律责任。如若转载,请注明出处:http://www.rhkb.cn/news/189348.html

如若内容造成侵权/违法违规/事实不符,请联系长河编程网进行投诉反馈email:809451989@qq.com,一经查实,立即删除!

相关文章

Django(复习篇)

项目创建 1. 虚拟环境 python -m venv my_env ​ cd my_env activate/deactivate ​ pip install django ​2. 项目和app创建 cd mypros django-admin startproject Pro1 django-admin startapp app1 ​3. settings配置INSTALLED_APPS【app1"】TEMPLATES【 DIRS: [os.pat…

JavaEE初阶学习:Linux 基本使用和 web 程序部署

1.Linux的基本认识 Linux 是一个操作系统.(搞管理的系统) 和Windows都是同类产品~~ Linux 实际的场景: 1.服务器 2.嵌入式设备 3.移动端(手机)Android 其实就是Linux 1991年,还在读大学的 芬兰人 Linus Benedict Torvalds,搞了一个Linux 这样的系统0.01版,正式发布了~ 后…

数据结构-双向链表

目录 1.带头双向循环链表&#xff1a; 2. 带头双向循环链表的实现&#xff1a; 双向链表初始化&#xff1a; 双向链表打印&#xff1a; 开辟节点函数&#xff1a; 双向链表头插&#xff1a; 双向链表尾插&#xff1a; 双向链表头删&#xff1a; 双向链表尾删&#xff…

指标体系:洞察变化的原因

一、指标概述 指标体系是指根据运营目标&#xff0c;整理出可以正确和准确反映业务运营特点的多个指标&#xff0c;并根据指标间的联系形成有机组合。 指标体系业务意义极强&#xff0c;所有指标体系都是为特定的业务经营目的而设计的。指标体系的设计应服从于这种目的&#x…

【数据结构】二叉树经典例题---<你真的掌握二叉树了吗?>(第一弹)

一、已知一颗二叉树如下图&#xff0c;试求&#xff1a; (1)该二叉树前序、中序和后序遍历的结果。 (2)该二叉树是否为满二叉树&#xff1f;是否为完全二叉树&#xff1f; (3)将它转换成对应的树或森林。 (4)这颗二叉树的深度为多少? (5)试对该二叉树进行前序线索化。 (6)试对…

算法之双指针

双指针算法的作用 双指针算法是一种使用2个变量对线性结构(逻辑线性/物理线性)&#xff0c;进行操作的算法&#xff0c;双指针可以对线性结构进行时间复杂度优化&#xff0c;可以对空间进行记忆。 双指针算法的分类 1.快慢指针 2.滑动窗口 3.左右指针 4.前后指针 双指针OJ题目…

docker可视化

什么是portainer&#xff1f; portainer就是docker图形化界面的管理工具&#xff0c;提供一个后台面板供我们操作 目前先用portainer(先用这个)&#xff0c;以后还会用到Rancher(CI/CD在用) 1.下载portainer 9000是内网端口&#xff0c;8088是外网访问端口 docker run…

Linux文件系统(1)

Linux文件系统(1) &#x1f4df;作者主页&#xff1a;慢热的陕西人 &#x1f334;专栏链接&#xff1a;Linux &#x1f4e3;欢迎各位大佬&#x1f44d;点赞&#x1f525;关注&#x1f693;收藏&#xff0c;&#x1f349;留言 本博客主要内容从系统层面重新认识我们的文件系统 文…

每日一题(LeetCode)----数组--长度最小的子数组

每日一题(LeetCode)----数组–长度最小的子数组 1.题目&#xff08; 209.长度最小的子数组&#xff09; 给定一个含有 n 个正整数的数组和一个正整数 target 。 找出该数组中满足其总和大于等于 target 的长度最小的 连续子数组 [numsl, numsl1, ..., numsr-1, numsr] &…

【入门Flink】- 10基于时间的双流联合(join)

统计固定时间内两条流数据的匹配情况&#xff0c;需要自定义来实现——可以用窗口&#xff08;window&#xff09;来表示。为了更方便地实现基于时间的合流操作&#xff0c;Flink 的 DataStrema API 提供了内置的 join 算子。 窗口联结&#xff08;Window Join&#xff09; 一…

JavaScript_动态表格_添加功能

1、动态表格_添加功能.html <!DOCTYPE html> <html lang"en"> <head><meta charset"UTF-8"><title>动态表格_添加功能</title><style>table{border: 1px solid;margin: auto;width: 100%;}td,th{text-align: ce…

SOME/IP 协议介绍(四)RPC协议规范

RPC协议规范 本章描述了SOME/IP的RPC协议。 传输协议绑定 为了传输不同传输协议的SOME/IP消息&#xff0c;可以使用多种传输协议。SOME/IP目前支持UDP和TCP。它们的绑定在以下章节中进行了解释&#xff0c;而第[SIP_RPC_450页&#xff0c;第36页]节讨论了选择哪种传输协议。…

【Go入门】面向对象

【Go入门】面向对象 前面两章我们介绍了函数和struct&#xff0c;那你是否想过函数当作struct的字段一样来处理呢&#xff1f;今天我们就讲解一下函数的另一种形态&#xff0c;带有接收者的函数&#xff0c;我们称为method method 现在假设有这么一个场景&#xff0c;你定义…

Linux驱动开发——PCI设备驱动

目录 一、 PCI协议简介 二、PCI和PCI-e 三、Linux PCI驱动 四、 PCI设备驱动实例 五、 总线类设备驱动开发习题 一、 PCI协议简介 PCI (Peripheral Component Interconnect&#xff0c;外设部件互联) 局部总线是由Intel 公司联合其他几家公司一起开发的一种总线标准&#…

前端开发引入element plus与windi css

背景 前端开发有很多流行框架&#xff0c;像React 、angular、vue等等&#xff0c;本文主要讲vue 给新手用的教程&#xff0c;其实官网已经写的很清楚&#xff0c;这里再啰嗦只是为了给新手提供一个更加简单明了的参考手册。 一、打开element plus官网选则如图所示模块安装命令…

Nginx缓存基础

1 nginx缓存的流程 客户端需要访问服务器的数据时&#xff0c;如果都直接向服务器发送请求&#xff0c;服务器接收过多的请求&#xff0c;压力会比较大&#xff0c;也比较耗时&#xff1b;而如果在nginx缓存一定的数据&#xff0c;使客户端向基于nginx的代理服务器发送请求&…

华为L410上制作内网镜像模板02

原文链接&#xff1a;华为L410上制作离线安装软件模板02 hello&#xff0c;大家好啊&#xff0c;今天给大家带来第二篇在内网搭建Apache服务器&#xff0c;用于安装完内网操作系统后&#xff0c;在第一次开机时候&#xff0c;为系统安装软件的文章&#xff0c;今天给大家介绍在…

Linux之基础开发工具gdb调试器的使用(三)

文章目录 一、Linux调试器-gdb使用1、安装gdb2、背景3、Debug和release4、区分Debug和release 二、Linux调试器-gdb命令演示1、显示指定行之后的代码&#xff08;自动记录最后一条指令&#xff09;2、断点1、打印断点2、查看断点3、删除断点4、使能&#xff08;禁用/开启&#…

StartUML的基本使用

文章目录 简介和安装创建包创建类视图时序图 简介和安装 最近在学习一个项目的时候用到了StartUML来构造项目的类图和时序图 虽然vs2019有类视图&#xff0c;但是也不是很清晰&#xff0c;并没有生成uml图&#xff0c;但是宇宙最智能的IDE IDEA有生成uml图的功能 下面就简单介…

Flowable 外部表单

内置表单需要在每个节点中去配置&#xff0c;当如果多个节点使用同一套表单属性就要配置多次比较麻烦&#xff0c;修改的时候也要修改多次&#xff0c;外部表单可以定义一次&#xff0c;然后其它节点都去引用同一个表单属性。 外部表单需要定义一个.form后缀的文件。 外部表单…