目标跟踪算法(bytetrack)-tensorrt部署教程

一、本机安装python环境

conda create -n bytetrace_env python=3.8
activate bytetrace_env
conda install pytorch torchvision cudatoolkit=10.1 -c

检测GPU是否可用,不可用不行

import torch
print(torch.cuda.is_available())

安装bytetrack

git clone https://github.com/ifzhang/ByteTrack.git
cd ByteTrack
pip install -r requirements.txt -i https://pypi.tuna.tsinghua.edu.cn/simple
python setup.py develop

在这里插入图片描述
上述即安装成功。
安装pycocotools

pip install cython
pip install 'git+https://github.com/cocodataset/cocoapi.git#subdirectory=PythonAPI'

或者(Linux)

pip install git+https://gitee.com/pursuit_zhangyu/cocoapi.git#subdirectory=PythonAPI

windows下

pip install pycocotools-windows

二、安装tensorrt环境

下载tensorrt包
TensorRT-8.4.3.1.Windows10.x86_64.cuda-10.2.cudnn8.4.zip
在这里插入图片描述
将所有的dll复制到
C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v10.0\bin
并设置环境变量。
在这里插入图片描述
虚拟环境中python版本为python3.8

pip install tensorrt-8.4.3.1-cp38-none-win_amd64.whl

三、转换模型

https://pan.baidu.com/s/1PiP1kQfgxAIrnGUbFP6Wfg
qflm
获取bytetrack_s_mot17.pth.tar并创建pretrained进行存放

python tools/trt.py -f exps/example/mot/yolox_s_mix_det.py -c pretrained/bytetrack_s_mot17.pth.tar

最后在D:\git_clone\ByteTrack-main\YOLOX_outputs\yolox_s_mix_det目录下生成tensorrt模型与pth模型:
在这里插入图片描述

四、cmake生成eigen库并使用VS2015编译

https://pan.baidu.com/s/15kEfCxpy-T7tz60msxxExg
ueq4
在这里插入图片描述
在这里插入图片描述
在这里插入图片描述
在这里插入图片描述
在这里插入图片描述

五、下载opencv450

https://nchc.dl.sourceforge.net/project/opencvlibrary/4.5.0/opencv-4.5.0-vc14_vc15.exe?viasf=1
安装D:\opencv450

六、cmake生成bytetrack并使用VS2015编译

修改CMakeLists.txt

cmake_minimum_required(VERSION 2.6)project(bytetrack)add_definitions(-std=c++11)option(CUDA_USE_STATIC_CUDA_RUNTIME OFF)
set(CMAKE_CXX_STANDARD 11)
set(CMAKE_BUILD_TYPE Debug)find_package(CUDA REQUIRED)include_directories(${PROJECT_SOURCE_DIR}/include)
include_directories(D:\VS2015_CUDA\TensorRT\eigen-3.3.9\build\install\include\eigen3)
link_directories(${PROJECT_SOURCE_DIR}/include)
# include and link dirs of cuda and tensorrt, you need adapt them if yours are different
# cuda
include_directories(C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v10.0\include)
link_directories(C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v10.0\lib\x64)
# cudnn
include_directories(C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v10.0\include)
link_directories(C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v10.0\lib\x64)
# tensorrt
include_directories(D:\VS2015_CUDA\TensorRT\TensorRT-8.4.3.1\include)
link_directories(D:\VS2015_CUDA\TensorRT\TensorRT-8.4.3.1\lib)set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++11 -Wall -D_MWAITXINTRIN_H_INCLUDED")set(OpenCV_INCLUDE_DIRS_DIRS D:\opencv450\build\include)
set(OpenCV_LIBS D:\opencv450\build\x64\vc14\lib)
include_directories(${OpenCV_INCLUDE_DIRS})file(GLOB My_Source_Files ${PROJECT_SOURCE_DIR}/src/*.cpp)
add_executable(bytetrack ${My_Source_Files})
target_link_libraries(bytetrack nvinfer)
target_link_libraries(bytetrack cudart)
target_link_libraries(bytetrack ${OpenCV_LIBS})add_definitions(-O2 -pthread)

使用CMake进行配置生成。
打开VS2015工程进行配置
在这里插入图片描述
在这里插入图片描述
在这里插入图片描述
opencv可以自己加进去编译。
在这里插入图片描述
修改bytetrack.cpp

int img_w = cap.get(cv::CAP_PROP_FRAME_WIDTH);
int img_h = cap.get(cv::CAP_PROP_FRAME_HEIGHT);

这里有个bug,高的参数不对,会影响后面的demo.mp4。
编译生成
在这里插入图片描述

七、使用Dependency Walker查看exe依赖

在这里插入图片描述
在这里插入图片描述

D:\VS2015_CUDA\TensorRT\cpp\model_trt.engine -i D:\VS2015_CUDA\TensorRT\cpp\palace.mp4

八、windows源码

bytetrack.cpp

#include <fstream>
#include <iostream>
#include <sstream>
#include <numeric>
#include <chrono>
#include <vector>
#include <opencv2/opencv.hpp>
#include "NvInfer.h"
#include "cuda_runtime_api.h"
#include "logging.h"
#include "BYTETracker.h"#define CHECK(status) \do\{\auto ret = (status);\if (ret != 0)\{\cerr << "Cuda failure: " << ret << endl;\abort();\}\} while (0)#define DEVICE 0  // GPU id
#define NMS_THRESH 0.7
#define BBOX_CONF_THRESH 0.1using namespace nvinfer1;// stuff we know about the network and the input/output blobs
static const int INPUT_W = 1088;
static const int INPUT_H = 608;
const char* INPUT_BLOB_NAME = "input_0";
const char* OUTPUT_BLOB_NAME = "output_0";
static Logger gLogger;Mat static_resize(Mat& img) {float r = min(INPUT_W / (img.cols*1.0), INPUT_H / (img.rows*1.0));// r = std::min(r, 1.0f);int unpad_w = r * img.cols;int unpad_h = r * img.rows;Mat re(unpad_h, unpad_w, CV_8UC3);resize(img, re, re.size());Mat out(INPUT_H, INPUT_W, CV_8UC3, Scalar(114, 114, 114));re.copyTo(out(Rect(0, 0, re.cols, re.rows)));return out;
}struct GridAndStride
{int grid0;int grid1;int stride;
};static void generate_grids_and_stride(const int target_w, const int target_h, vector<int>& strides, vector<GridAndStride>& grid_strides)
{for (auto stride : strides){GridAndStride GS;int num_grid_w = target_w / stride;int num_grid_h = target_h / stride;for (int g1 = 0; g1 < num_grid_h; g1++){for (int g0 = 0; g0 < num_grid_w; g0++){GS.grid0 = g0;GS.grid1 = g1;GS.stride = stride;grid_strides.push_back(GS);}}}
}static inline float intersection_area(const Object& a, const Object& b)
{Rect_<float> inter = a.rect & b.rect;return inter.area();
}static void qsort_descent_inplace(vector<Object>& faceobjects, int left, int right)
{int i = left;int j = right;float p = faceobjects[(left + right) / 2].prob;while (i <= j){while (faceobjects[i].prob > p)i++;while (faceobjects[j].prob < p)j--;if (i <= j){// swapswap(faceobjects[i], faceobjects[j]);i++;j--;}}#pragma omp parallel sections{#pragma omp section{if (left < j) qsort_descent_inplace(faceobjects, left, j);}#pragma omp section{if (i < right) qsort_descent_inplace(faceobjects, i, right);}}
}static void qsort_descent_inplace(vector<Object>& objects)
{if (objects.empty())return;qsort_descent_inplace(objects, 0, objects.size() - 1);
}static void nms_sorted_bboxes(const vector<Object>& faceobjects, vector<int>& picked, float nms_threshold)
{picked.clear();const int n = faceobjects.size();vector<float> areas(n);for (int i = 0; i < n; i++){areas[i] = faceobjects[i].rect.area();}for (int i = 0; i < n; i++){const Object& a = faceobjects[i];int keep = 1;for (int j = 0; j < (int)picked.size(); j++){const Object& b = faceobjects[picked[j]];// intersection over unionfloat inter_area = intersection_area(a, b);float union_area = areas[i] + areas[picked[j]] - inter_area;// float IoU = inter_area / union_areaif (inter_area / union_area > nms_threshold)keep = 0;}if (keep)picked.push_back(i);}
}static void generate_yolox_proposals(vector<GridAndStride> grid_strides, float* feat_blob, float prob_threshold, vector<Object>& objects)
{const int num_class = 1;const int num_anchors = grid_strides.size();for (int anchor_idx = 0; anchor_idx < num_anchors; anchor_idx++){const int grid0 = grid_strides[anchor_idx].grid0;const int grid1 = grid_strides[anchor_idx].grid1;const int stride = grid_strides[anchor_idx].stride;const int basic_pos = anchor_idx * (num_class + 5);// yolox/models/yolo_head.py decode logicfloat x_center = (feat_blob[basic_pos+0] + grid0) * stride;float y_center = (feat_blob[basic_pos+1] + grid1) * stride;float w = exp(feat_blob[basic_pos+2]) * stride;float h = exp(feat_blob[basic_pos+3]) * stride;float x0 = x_center - w * 0.5f;float y0 = y_center - h * 0.5f;float box_objectness = feat_blob[basic_pos+4];for (int class_idx = 0; class_idx < num_class; class_idx++){float box_cls_score = feat_blob[basic_pos + 5 + class_idx];float box_prob = box_objectness * box_cls_score;if (box_prob > prob_threshold){Object obj;obj.rect.x = x0;obj.rect.y = y0;obj.rect.width = w;obj.rect.height = h;obj.label = class_idx;obj.prob = box_prob;objects.push_back(obj);}} // class loop} // point anchor loop
}float* blobFromImage(Mat& img){cvtColor(img, img, COLOR_BGR2RGB);float* blob = new float[img.total()*3];int channels = 3;int img_h = img.rows;int img_w = img.cols;vector<float> mean = {0.485f, 0.456f, 0.406f};vector<float> std = {0.229f, 0.224f, 0.225f};for (size_t c = 0; c < channels; c++) {for (size_t  h = 0; h < img_h; h++) {for (size_t w = 0; w < img_w; w++) {blob[c * img_w * img_h + h * img_w + w] =(((float)img.at<Vec3b>(h, w)[c]) / 255.0f - mean[c]) / std[c];}}}return blob;
}static void decode_outputs(float* prob, vector<Object>& objects, float scale, const int img_w, const int img_h) {vector<Object> proposals;vector<int> strides = {8, 16, 32};vector<GridAndStride> grid_strides;generate_grids_and_stride(INPUT_W, INPUT_H, strides, grid_strides);generate_yolox_proposals(grid_strides, prob,  BBOX_CONF_THRESH, proposals);//std::cout << "num of boxes before nms: " << proposals.size() << std::endl;qsort_descent_inplace(proposals);vector<int> picked;nms_sorted_bboxes(proposals, picked, NMS_THRESH);int count = picked.size();//std::cout << "num of boxes: " << count << std::endl;objects.resize(count);for (int i = 0; i < count; i++){objects[i] = proposals[picked[i]];// adjust offset to original unpaddedfloat x0 = (objects[i].rect.x) / scale;float y0 = (objects[i].rect.y) / scale;float x1 = (objects[i].rect.x + objects[i].rect.width) / scale;float y1 = (objects[i].rect.y + objects[i].rect.height) / scale;// clip// x0 = std::max(std::min(x0, (float)(img_w - 1)), 0.f);// y0 = std::max(std::min(y0, (float)(img_h - 1)), 0.f);// x1 = std::max(std::min(x1, (float)(img_w - 1)), 0.f);// y1 = std::max(std::min(y1, (float)(img_h - 1)), 0.f);objects[i].rect.x = x0;objects[i].rect.y = y0;objects[i].rect.width = x1 - x0;objects[i].rect.height = y1 - y0;}
}const float color_list[80][3] =
{{0.000, 0.447, 0.741},{0.850, 0.325, 0.098},{0.929, 0.694, 0.125},{0.494, 0.184, 0.556},{0.466, 0.674, 0.188},{0.301, 0.745, 0.933},{0.635, 0.078, 0.184},{0.300, 0.300, 0.300},{0.600, 0.600, 0.600},{1.000, 0.000, 0.000},{1.000, 0.500, 0.000},{0.749, 0.749, 0.000},{0.000, 1.000, 0.000},{0.000, 0.000, 1.000},{0.667, 0.000, 1.000},{0.333, 0.333, 0.000},{0.333, 0.667, 0.000},{0.333, 1.000, 0.000},{0.667, 0.333, 0.000},{0.667, 0.667, 0.000},{0.667, 1.000, 0.000},{1.000, 0.333, 0.000},{1.000, 0.667, 0.000},{1.000, 1.000, 0.000},{0.000, 0.333, 0.500},{0.000, 0.667, 0.500},{0.000, 1.000, 0.500},{0.333, 0.000, 0.500},{0.333, 0.333, 0.500},{0.333, 0.667, 0.500},{0.333, 1.000, 0.500},{0.667, 0.000, 0.500},{0.667, 0.333, 0.500},{0.667, 0.667, 0.500},{0.667, 1.000, 0.500},{1.000, 0.000, 0.500},{1.000, 0.333, 0.500},{1.000, 0.667, 0.500},{1.000, 1.000, 0.500},{0.000, 0.333, 1.000},{0.000, 0.667, 1.000},{0.000, 1.000, 1.000},{0.333, 0.000, 1.000},{0.333, 0.333, 1.000},{0.333, 0.667, 1.000},{0.333, 1.000, 1.000},{0.667, 0.000, 1.000},{0.667, 0.333, 1.000},{0.667, 0.667, 1.000},{0.667, 1.000, 1.000},{1.000, 0.000, 1.000},{1.000, 0.333, 1.000},{1.000, 0.667, 1.000},{0.333, 0.000, 0.000},{0.500, 0.000, 0.000},{0.667, 0.000, 0.000},{0.833, 0.000, 0.000},{1.000, 0.000, 0.000},{0.000, 0.167, 0.000},{0.000, 0.333, 0.000},{0.000, 0.500, 0.000},{0.000, 0.667, 0.000},{0.000, 0.833, 0.000},{0.000, 1.000, 0.000},{0.000, 0.000, 0.167},{0.000, 0.000, 0.333},{0.000, 0.000, 0.500},{0.000, 0.000, 0.667},{0.000, 0.000, 0.833},{0.000, 0.000, 1.000},{0.000, 0.000, 0.000},{0.143, 0.143, 0.143},{0.286, 0.286, 0.286},{0.429, 0.429, 0.429},{0.571, 0.571, 0.571},{0.714, 0.714, 0.714},{0.857, 0.857, 0.857},{0.000, 0.447, 0.741},{0.314, 0.717, 0.741},{0.50, 0.5, 0}
};void doInference(IExecutionContext& context, float* input, float* output, const int output_size, Size input_shape) {const ICudaEngine& engine = context.getEngine();// Pointers to input and output device buffers to pass to engine.// Engine requires exactly IEngine::getNbBindings() number of buffers.assert(engine.getNbBindings() == 2);void* buffers[2];// In order to bind the buffers, we need to know the names of the input and output tensors.// Note that indices are guaranteed to be less than IEngine::getNbBindings()const int inputIndex = engine.getBindingIndex(INPUT_BLOB_NAME);assert(engine.getBindingDataType(inputIndex) == nvinfer1::DataType::kFLOAT);const int outputIndex = engine.getBindingIndex(OUTPUT_BLOB_NAME);assert(engine.getBindingDataType(outputIndex) == nvinfer1::DataType::kFLOAT);int mBatchSize = engine.getMaxBatchSize();// Create GPU buffers on deviceCHECK(cudaMalloc(&buffers[inputIndex], 3 * input_shape.height * input_shape.width * sizeof(float)));CHECK(cudaMalloc(&buffers[outputIndex], output_size*sizeof(float)));// Create streamcudaStream_t stream;CHECK(cudaStreamCreate(&stream));// DMA input batch data to device, infer on the batch asynchronously, and DMA output back to hostCHECK(cudaMemcpyAsync(buffers[inputIndex], input, 3 * input_shape.height * input_shape.width * sizeof(float), cudaMemcpyHostToDevice, stream));context.enqueue(1, buffers, stream, nullptr);CHECK(cudaMemcpyAsync(output, buffers[outputIndex], output_size * sizeof(float), cudaMemcpyDeviceToHost, stream));cudaStreamSynchronize(stream);// Release stream and bufferscudaStreamDestroy(stream);CHECK(cudaFree(buffers[inputIndex]));CHECK(cudaFree(buffers[outputIndex]));
}int main(int argc, char** argv) {cudaSetDevice(DEVICE);// create a model using the API directly and serialize it to a streamchar *trtModelStream{nullptr};size_t size{0};if (argc == 4 && string(argv[2]) == "-i") {const string engine_file_path {argv[1]};ifstream file(engine_file_path, ios::binary);if (file.good()) {file.seekg(0, file.end);size = file.tellg();file.seekg(0, file.beg);trtModelStream = new char[size];assert(trtModelStream);file.read(trtModelStream, size);file.close();}} else {cerr << "arguments not right!" &

本文来自互联网用户投稿,该文观点仅代表作者本人,不代表本站立场。本站仅提供信息存储空间服务,不拥有所有权,不承担相关法律责任。如若转载,请注明出处:http://www.rhkb.cn/news/355874.html

如若内容造成侵权/违法违规/事实不符,请联系长河编程网进行投诉反馈email:809451989@qq.com,一经查实,立即删除!

相关文章

0.15元1.5Mhz-1.3A同步整流BUCK降压DCDC芯片MT3410(MT3410LB)

前言 国产同步整流DCDC&#xff0c;参考价格约0.15元。 特征 高效率&#xff1a;高达 96% 1.5MHz恒定频率操作 1.3A 输出电流 无需肖特基二极管 2.3V至7V输入电压范围 输出电压低至 0.6V PFM 模式可在轻负载下实现高效率 压差操作中的100%占空比 低静态电流&#xff1a;35μ…

网络爬虫设置代理服务器

目录 1&#xff0e;获取代理 IP 2&#xff0e;设置代理 IP 3. 检测代理 IP 的有效性 4. 处理异常 如果希望在网络爬虫程序中使用代理服务器&#xff0c;就需要为网络爬虫程序设置代理服务器。 设置代理服务器一般分为获取代理 IP 、设置代理 IP 两步。接下来&#xff0c;分…

【数据库备份完整版】物理备份、逻辑备份,mysqldump、mysqlbinlog的备份方法

【数据库备份完整版】物理备份、逻辑备份&#xff0c;mysqldump、mysqlbinlog的备份方法 一、物理备份二、逻辑备份1.mysqldump和binlog备份的方式&#xff1a;2.mysqldump完整备份与恢复数据2.1 mysqldump概念2.2 mysqldump备份2.3 数据恢复2.4 **使用 Cron 自动执行备份**2.5…

机器学习:人工智能的子领域之一

引言 人工智能&#xff08;AI&#xff09;已经成为现代科技的重要组成部分&#xff0c;推动了许多领域的创新与进步。在人工智能的诸多子领域中&#xff0c;机器学习&#xff08;ML&#xff09;无疑是最关键和最具影响力的一个。机器学习通过自动分析和学习数据中的模式&#x…

机器学习算法的电影推荐系统以及票房预测系统

一、实验概述 1. 实验目标 本项目希望基于电影数据集&#xff0c;依据电影的简介、关键词、预算、票房、用户评分等特征来对电影进行分析&#xff0c;并完成以下任务&#xff1a; 对电影特征的可视化分析对电影票房的预测多功能个性化的电影推荐算法 2. 数据集 针对票房预…

湖南科技大学24计算机考研情况,软工学硕考数二,分数线290分,录取均分321分!

湖南科技大学&#xff08;Hunan University of Science and Technology&#xff09;坐落在伟人故里、人文圣地湘潭&#xff0c;处于长株潭核心区域&#xff0c;比邻湘潭九华经济技术开发区&#xff08;国家级&#xff09;&#xff0c;是应急管理部、国家国防科技工业局与湖南省…

自监督分类网络:创新的端到端学习方法

现代人工智能的快速发展中&#xff0c;分类任务的高效解决方案一直备受关注。今天&#xff0c;我们向大家介绍一种名为Self-Classifier的全新自监督端到端分类学习方法。由Elad Amrani、Leonid Karlinsky和Alex Bronstein团队开发&#xff0c;Self-Classifier通过优化同一样本的…

探索C嘎嘎的奇妙世界:第十六关---STL(vector的练习)

1.只出现一次的数字 我们可以使用异或运算来解决这个问题&#xff1a; 异或运算有一个重要的性质&#xff1a;两个相同的数进行异或运算结果为 0&#xff0c;任何数与 0 异或结果为其本身。对于数组中的元素&#xff0c;依次进行异或运算&#xff0c;出现两次的元素异…

智谱API调用

一、智谱API 文心一言api 千帆大模型平台 申请和使用 智谱AI开放平台 登录智谱AI开放平台&#xff0c;点击右上角的开发者工作台&#xff0c;然后查看自己的API glm-4 接口 conda create -n zhipuai python3.10 -y 二、如何使用 这边的介绍是根据官方文档的接口文档来进行介绍…

postman 工具下载安装使用教程_postman安装

本文讲解的是postman工具下载、Postman安装步骤、postman下载、postman安装教程。Postman是一款流行的API测试工具&#xff0c;它提供了一个用户友好的界面&#xff0c;用于发送和测试API请求&#xff0c;并且可以轻松地按需管理和组织请求。 这使得开发人员和测试人员能够更高…

MATLAB神经网络---lstmLayer(LSTM 长短期记忆神经网络)

前言 描述LSTM就要先描述一下循环神经网络 循环神经网络 循环神经网络通过使用带自反馈的神经元&#xff0c;使得网络的输出不仅和当前的输入有关&#xff0c;还和上一时刻的输出相关&#xff0c;于是在处理任意长度的时序数据时&#xff0c;就具有短期记忆能力。 如下是一个…

《沃趣 分手后霸道少爷宠爆我》盛大开机典礼

南京五聚文化传媒有限公司自豪地宣布&#xff0c;引人入胜的2024年度短剧巨作——《沃趣 分手后霸道少爷宠爆我》——今日正式开拍&#xff01;在星辰下的华丽舞台上&#xff0c;我们汇集了业界的精英力量&#xff0c;准备讲述一个关于爱、错位与重生的故事。 典礼精彩亮点 1.…

AttributeError: ‘ImageDraw‘ object has no attribute ‘textsize‘

python项目生成词云图的时候报错&#xff1a;AttributeError: ‘ImageDraw’ object has no attribute ‘textsize’ 解决办法 出现这个问题&#xff0c;可能是因为Pillow版本过高导致的&#xff0c;我们可以尝试通过降低Pillow的版本来解决它。 我通过将Pillow版本降低到9.4.…

微信小程序接入lottie动画

1、注意&#xff1a;canvas渲染出来的层级太高&#xff0c;当有弹窗的情况会暴露在弹窗外 模拟器上会有这个问题&#xff0c;线上版本不会有 2、需求 需要把lottie动画在小程序的环境下进行展示 3、什么是lottie动画 由Airbnb开发并开源。允许设计师将复杂的矢量动画导出为…

【单片机毕业设计选题24019】-基于STM32的安防监测灭火系统

系统功能: 1. 水泵喷水灭火功能&#xff1a;当火焰传感器监测到火焰时&#xff0c;蜂鸣器报警&#xff0c;水泵工作实现灭火。 2. 风扇功能&#xff1a;当烟雾传感器检测到CO或温度传感器检测到温度超过阈值时&#xff0c;蜂鸣器报警&#xff0c; 启动风扇进行驱散烟雾或降温…

Springboot + Mybatis 实现sql打印

参照这个视频&#xff1a;https://www.bilibili.com/video/BV1MS411N7mn/?vd_source90ebeef3261cec486646b6583e9f45f5 实现mybatis对外暴露的接口Interceptor 使用Intercepts接口,这里的写法参照mybatis-plus中的拦截器写法 Intercepts({Signature(type Executor.class, m…

MDK-ARM 编译后 MAP 文件分析

本文配合 STM32 堆栈空间分布 食用更佳&#xff01; 一图胜千言。。。

Java中setLineWrap(true)和setWrapStyleWord(true)优化TextArea

在 Java Swing 开发中&#xff0c;JTextArea 是一个多行的文本区域组件&#xff0c;常用于显示和编辑大量文本。当处理长文本时&#xff0c;默认行为是不换行并且出现水平滚动条&#xff0c;这通常会降低用户体验。幸运的是&#xff0c;JTextArea 提供了两个非常有用的方法&…

哈喽GPT-4o——对GPT-4o 提示词的思考与看法

目录 一、提示词二、常用的提示词案例1、写作助理2、改写为小红书风格3、英语翻译和改写4、论文式回答5、主题解构6、提问助手7、Nature风格润色8、结构总结9、编程助手10、充当终端/解释器 大家好&#xff0c;我是哪吒。 最近&#xff0c;ChatGPT在网络上广受欢迎&#xff0c…

【2024最新华为OD-C/D卷试题汇总】[支持在线评测] 5G基站光纤连接问题(200分) - 三语言AC题解(Python/Java/Cpp)

&#x1f36d; 大家好这里是清隆学长 &#xff0c;一枚热爱算法的程序员 ✨ 本系列打算持续跟新华为OD-C/D卷的三语言AC题解 &#x1f4bb; ACM银牌&#x1f948;| 多次AK大厂笔试 &#xff5c; 编程一对一辅导 &#x1f44f; 感谢大家的订阅➕ 和 喜欢&#x1f497; &#x1f…