亚博microros小车-原生ubuntu支持系列:26手势控制小车基础运动

背景知识

手指检测:亚博microros小车-原生ubuntu支持系列:4-手部检测-CSDN博客

程序功能说明

功能开启后,摄像头捕获图像,识别手势来控制小车移动。

手势 “5”小车前进
拳头小车后退
手势 “1”小车向左
手势 “2”小车向右

运行:

启动小车代理、图像代理

ros2 run yahboom_esp32ai_car HandCtrl

日志:

fingers:  [0, 1, 0, 0, 0]
fingers:  [0, 1, 1, 0, 0]
fingers:  [0, 1, 0, 0, 0]
fingers:  [0, 1, 0, 0, 0]
fingers:  [0, 1, 0, 0, 0]
fingers:  [0, 1, 0, 0, 0]
fingers:  [0, 1, 0, 0, 0]
fingers:  [0, 1, 0, 0, 0]
fingers:  [0, 1, 0, 0, 0]
fingers:  [0, 1, 0, 0, 0]
fingers:  [0, 1, 0, 0, 0]
fingers:  [1, 0, 0, 0, 1]
fingers:  [0, 1, 1, 0, 0]
fingers:  [0, 1, 1, 0, 0]
fingers:  [0, 1, 1, 0, 0]
fingers:  [0, 1, 1, 0, 0]
fingers:  [0, 1, 1, 0, 0]
fingers:  [0, 1, 1, 0, 0]
fingers:  [0, 1, 1, 0, 0]
fingers:  [0, 1, 1, 0, 0]
fingers:  [0, 1, 1, 0, 0]
fingers:  [0, 1, 1, 0, 0]
fingers:  [0, 0, 0, 0, 0]
fingers:  [0, 0, 0, 0, 0]
fingers:  [1, 0, 0, 0, 1]
fingers:  [1, 0, 0, 0, 0]
fingers:  [1, 0, 0, 0, 0]
fingers:  [1, 0, 0, 0, 0]
fingers:  [1, 1, 0, 0, 0]
fingers:  [1, 1, 0, 0, 0]
fingers:  [1, 0, 0, 0, 0]
fingers:  [1, 0, 0, 0, 0]
fingers:  [1, 1, 0, 0, 0]
fingers:  [1, 0, 0, 0, 0]

src/yahboom_esp32ai_car/yahboom_esp32ai_car/目录下新建media_library.py 

#!/usr/bin/env python3
# encoding: utf-8
import base64
import math
import rclpy
from rclpy.node import Node
import cv2 as cv
import mediapipe as mp
from time import sleep
from std_msgs.msg import Int32, Bool,UInt16
from yahboomcar_msgs.msg import *
from geometry_msgs.msg import Twist
from sensor_msgs.msg import Imagedef get_dist(point1, point2):'''获取两点之间的距离(x1-x2)**2+(y1-y2)**2=dist**2'''x1, y1 = point1x2, y2 = point2return abs(math.sqrt(math.pow(abs(y1 - y2), 2) + math.pow(abs(x1 - x2), 2)))def calc_angle(pt1, pt2, pt3):'''求中间点的夹角cos(B)=(a^2+c^2-b^2)/2ac'''a = get_dist(pt1, pt2)b = get_dist(pt2, pt3)c = get_dist(pt1, pt3)try:radian = math.acos((math.pow(a, 2) + math.pow(b, 2) - math.pow(c, 2)) / (2 * a * b))angle = radian / math.pi * 180except: angle = 0return angleclass HandDetector:def __init__(self, mode=False, maxHands=1, detectorCon=0.5, trackCon=0.5):self.mpHand = mp.solutions.handsself.draw = Falseself.mpDraw = mp.solutions.drawing_utilsself.hands = self.mpHand.Hands(static_image_mode=mode,max_num_hands=maxHands,min_detection_confidence=detectorCon,min_tracking_confidence=trackCon)def findHands(self, frame):lmList = []self.cxList = []self.cyList = []bbox = 0, 0, 0, 0img_RGB = cv.cvtColor(frame, cv.COLOR_BGR2RGB)self.results = self.hands.process(img_RGB)if self.results.multi_hand_landmarks:for i in range(len(self.results.multi_hand_landmarks)):if not self.draw: self.mpDraw.draw_landmarks(frame, self.results.multi_hand_landmarks[i], self.mpHand.HAND_CONNECTIONS)for id, lm in enumerate(self.results.multi_hand_landmarks[i].landmark):h, w, c = frame.shapecx, cy = int(lm.x * w), int(lm.y * h)# print(id, lm.x, lm.y, lm.z)lmList.append([id, cx, cy])self.cxList.append(cx)self.cyList.append(cy)if len(self.cxList) != 0 and len(self.cyList) != 0:xmin, xmax = min(self.cxList), max(self.cxList)ymin, ymax = min(self.cyList), max(self.cyList)bbox = xmin, ymin, xmax, ymaxif self.draw: cv.rectangle(frame, (xmin - 20, ymin - 20), (xmax + 20, ymax + 20), (0, 255, 0), 2)return frame, lmList, bboxdef fingersUp(self,lmList):fingers = []point1 = lmList[4][1:3]point2 = lmList[3][1:3]point3 = lmList[2][1:3]point4 = lmList[1][1:3]# Thumbif (abs(calc_angle(point1, point2, point3)) > 150.0) and (abs(calc_angle(point2, point3, point4)) > 150.0): fingers.append(1)else: fingers.append(0)# 4 fingertipIds = [4, 8, 12, 16, 20]for id in range(1, 5):if lmList[tipIds[id]][2] < lmList[tipIds[id] - 2][2]: fingers.append(1)else: fingers.append(0)return fingersdef ThumbTOforefinger(self,lmList):point1 = lmList[4][1:3]point2 = lmList[0][1:3]point3 = lmList[8][1:3]return abs(calc_angle(point1, point2, point3))def get_gesture(self,lmList):gesture = ""fingers = self.fingersUp(lmList)if fingers[2] == fingers[3] == fingers[4] == 1:if self.ThumbTOforefinger(lmList) < 10: gesture = "OK"if fingers[1] == fingers[2] == 1 and sum(fingers) == 2: gesture = "Yes"try:if self.cyList[4] == max(self.cyList): gesture = "Thumb_down"except Exception as e: print("e: ", e)return gestureclass PoseDetector:def __init__(self, mode=False, smooth=True, detectionCon=0.5, trackCon=0.5):self.mpPose = mp.solutions.poseself.mpDraw = mp.solutions.drawing_utilsself.pose = self.mpPose.Pose(static_image_mode=mode,smooth_landmarks=smooth,min_detection_confidence=detectionCon,min_tracking_confidence=trackCon)self.lmDrawSpec = mp.solutions.drawing_utils.DrawingSpec(color=(0, 0, 255), thickness=-1, circle_radius=6)self.drawSpec = mp.solutions.drawing_utils.DrawingSpec(color=(0, 255, 0), thickness=2, circle_radius=2)def pubPosePoint(self, frame, draw=True):pointArray = []img_RGB = cv.cvtColor(frame, cv.COLOR_BGR2RGB)self.results = self.pose.process(img_RGB)if self.results.pose_landmarks:if draw: self.mpDraw.draw_landmarks(frame, self.results.pose_landmarks, self.mpPose.POSE_CONNECTIONS, self.lmDrawSpec, self.drawSpec)for id, lm in enumerate(self.results.pose_landmarks.landmark):h, w, c = frame.shapepointArray.append([id, lm.x * w, lm.y * h, lm.z])return frame, pointArrayclass Holistic:def __init__(self, staticMode=False, landmarks=True, detectionCon=0.5, trackingCon=0.5):self.mpHolistic = mp.solutions.holisticself.mpFaceMesh = mp.solutions.face_meshself.mpHands = mp.solutions.handsself.mpPose = mp.solutions.poseself.mpDraw = mp.solutions.drawing_utilsself.mpholistic = self.mpHolistic.Holistic(static_image_mode=staticMode,smooth_landmarks=landmarks,min_detection_confidence=detectionCon,min_tracking_confidence=trackingCon)self.lmDrawSpec = mp.solutions.drawing_utils.DrawingSpec(color=(0, 0, 255), thickness=-1, circle_radius=3)self.drawSpec = mp.solutions.drawing_utils.DrawingSpec(color=(0, 255, 0), thickness=2, circle_radius=2)def findHolistic(self, frame, draw=True):poseptArray = []lhandptArray = []rhandptArray = []h, w, c = frame.shapeimg_RGB = cv.cvtColor(frame, cv.COLOR_BGR2RGB)self.results = self.mpholistic.process(img_RGB)if self.results.pose_landmarks:if draw: self.mpDraw.draw_landmarks(frame, self.results.pose_landmarks, self.mpPose.POSE_CONNECTIONS, self.lmDrawSpec, self.drawSpec)for id, lm in enumerate(self.results.pose_landmarks.landmark):poseptArray.append([id, lm.x * w, lm.y * h, lm.z])if self.results.left_hand_landmarks:if draw: self.mpDraw.draw_landmarks(frame, self.results.left_hand_landmarks, self.mpHands.HAND_CONNECTIONS, self.lmDrawSpec, self.drawSpec)for id, lm in enumerate(self.results.left_hand_landmarks.landmark):lhandptArray.append([id, lm.x * w, lm.y * h, lm.z])if self.results.right_hand_landmarks:if draw: self.mpDraw.draw_landmarks(frame, self.results.right_hand_landmarks, self.mpHands.HAND_CONNECTIONS, self.lmDrawSpec, self.drawSpec)for id, lm in enumerate(self.results.right_hand_landmarks.landmark):rhandptArray.append([id, lm.x * w, lm.y * h, lm.z])return frame, poseptArray, lhandptArray, rhandptArrayclass FaceMesh:def __init__(self, staticMode=False, maxFaces=1, minDetectionCon=0.5, minTrackingCon=0.5):self.mpDraw = mp.solutions.drawing_utilsself.mpFaceMesh = mp.solutions.face_meshself.faceMesh = self.mpFaceMesh.FaceMesh(static_image_mode=staticMode,max_num_faces=maxFaces,min_detection_confidence=minDetectionCon,min_tracking_confidence=minTrackingCon)self.lmDrawSpec = mp.solutions.drawing_utils.DrawingSpec(color=(0, 0, 255), thickness=-1, circle_radius=3)self.drawSpec = self.mpDraw.DrawingSpec(color=(0, 255, 0), thickness=1, circle_radius=1)def pubFaceMeshPoint(self, frame, draw=True):pointArray = []h, w, c = frame.shapeimgRGB = cv.cvtColor(frame, cv.COLOR_BGR2RGB)self.results = self.faceMesh.process(imgRGB)if self.results.multi_face_landmarks:for i in range(len(self.results.multi_face_landmarks)):if draw: self.mpDraw.draw_landmarks(frame, self.results.multi_face_landmarks[i], self.mpFaceMesh.FACE_CONNECTIONS, self.lmDrawSpec, self.drawSpec)for id, lm in enumerate(self.results.multi_face_landmarks[i].landmark):pointArray.append([id, lm.x * w, lm.y * h, lm.z])return frame, pointArrayclass Media_ROS(Node):def __init__(self):super().__init__("mediapipe")#rclpy.on_shutdowm(self.cancel)self.Joy_active = Trueself.pub_cmdVel = self.create_publisher(Twist,"/cmd_vel",1)self.pub_Buzzer = self.create_publisher(UInt16,"/beep",1)self.pub_img = self.create_publisher(Image,'/image',500)def JoyStateCallback(self, msg):if not isinstance(msg, Bool): returnself.Joy_active = not self.Joy_activeself.pub_vel(0, 0)def pub_vel(self, x, y, z):twist = Twist()twist.linear.x = xtwist.linear.y = ytwist.angular.z = zself.pub_cmdVel.publish(twist)def pub_buzzer(self, status):self.pub_Buzzer.publish(status)def RobotBuzzer(self):self.pub_buzzer(True)sleep(1)self.pub_buzzer(False)sleep(1)self.pub_buzzer(False)for i in range(2):self.pub_vel(0, 0)sleep(0.1)# def pub_arm(self, joints, id=6, angle=180, runtime=500):#     arm_joint = ArmJoint()#     arm_joint.id = id#     arm_joint.angle = angle#     arm_joint.run_time = runtime#     if len(joints) != 0: arm_joint.joints = joints#     else: arm_joint.joints = []#     self.pubPoint.publish(arm_joint)#     # rospy.loginfo(arm_joint)def pub_imgMsg(self, frame):pic_base64 = base64.b64encode(frame)image = Image()size = frame.shapeimage.height = size[0]image.width = size[1]#image.channels = size[2]image.data = pic_base64self.pub_img.publish(image)def cancel(self):self.pub_cmdVel.publish(Twist())self.pub_cmdVel.unregister()self.pub_img.unregister()self.pub_Buzzer.unregister()#self.pubPoint.unregister()class SinglePID:def __init__(self, P=0.1, I=0.0, D=0.1):self.Kp = Pself.Ki = Iself.Kd = DImage_Msgprint("init_pid: ", P, I, D)self.pid_reset()def Set_pid(self, P, I, D):self.Kp = Pself.Ki = Iself.Kd = Dprint("set_pid: ", P, I, D)self.pid_reset()def pid_compute(self, target, current):self.error = target - currentself.intergral += self.errorself.derivative = self.error - self.prevErrorresult = self.Kp * self.error + self.Ki * self.intergral + self.Kd * self.derivativeself.prevError = self.errorreturn resultdef pid_reset(self):self.error = 0self.intergral = 0self.derivative = 0self.prevError = 0

新建HandCtrl.py ,这是亚博源文件,有bug,跑小车的自己调一下吧

#!/usr/bin/env python3
# encoding: utf-8
import threading
import cv2 as cv
import numpy as np
from yahboom_esp32ai_car.media_library import *
import  time
import rclpy
from rclpy.node import Node
from std_msgs.msg import Int32, Bool,UInt16
from cv_bridge import CvBridge
from sensor_msgs.msg import Image, CompressedImagefrom rclpy.time import Time
import datetimeclass PoseCtrlArm(Node):def __init__(self,name):super().__init__(name)self.pub_Servo1 = self.create_publisher(Int32,"servo_s1" , 10)self.pub_Servo2 = self.create_publisher(Int32,"servo_s2" , 10)self.x = 0self.y = 45servo1_angle = Int32()servo2_angle = Int32()servo1_angle.data = self.xservo2_angle.data = self.yself.car_status = Trueself.stop_status = 0self.locking = Falseself.pose_detector = Holistic()self.hand_detector = HandDetector()self.pTime = self.index = 0self.media_ros = Media_ROS()self.event = threading.Event()self.event.set()#确保角度正常处于中间for i in range(10):self.pub_Servo1.publish(servo1_angle)self.pub_Servo2.publish(servo2_angle)time.sleep(0.1)def process(self, frame):#frame = cv.flip(frame, 1)if self.media_ros.Joy_active:frame, lmList, _ = self.hand_detector.findHands(frame)#检测手if len(lmList) != 0:threading.Thread(target=self.hand_threading, args=(lmList,)).start()else:self.media_ros.pub_vel(0.0, 0.0,0.0)self.media_ros.pub_imgMsg(frame)return framedef hand_threading(self, lmList):if self.event.is_set():self.event.clear()self.stop_status = 0self.index = 0fingers = self.hand_detector.fingersUp(lmList)#检测手指print("fingers: ", fingers)if sum(fingers) == 5: #前进self.media_ros.pub_vel(0.3, 0.0,0.0)time.sleep(0.5)elif sum(fingers) == 0: #后退self.media_ros.pub_vel(-0.3, 0.0,0.0)time.sleep(0.5)elif sum(fingers) == 1 and fingers[1] == 1: #左self.media_ros.pub_vel(0.0, 0.0, 0.5)time.sleep(0.5)elif sum(fingers) == 2 and fingers[1] == 1 and fingers[2] == 1: #右self.media_ros.pub_vel(0.0, 0.0, -0.5)time.sleep(0.5)else:self.media_ros.pub_vel(0.0, 0.0, 0.0)self.event.set()class MY_Picture(Node):def __init__(self, name):super().__init__(name)self.bridge = CvBridge()self.sub_img = self.create_subscription(CompressedImage, '/espRos/esp32camera', self.handleTopic, 1) #获取esp32传来的图像self.pose_ctrl_arm = PoseCtrlArm('posectrlarm')self.last_stamp = Noneself.new_seconds = 0self.fps_seconds = 1def handleTopic(self, msg):self.last_stamp = msg.header.stamp  if self.last_stamp:total_secs = Time(nanoseconds=self.last_stamp.nanosec, seconds=self.last_stamp.sec).nanosecondsdelta = datetime.timedelta(seconds=total_secs * 1e-9)seconds = delta.total_seconds()*100if self.new_seconds != 0:self.fps_seconds = seconds - self.new_secondsself.new_seconds = seconds#保留这次的值start = time.time()frame = self.bridge.compressed_imgmsg_to_cv2(msg)frame = cv.resize(frame, (640, 480))cv.waitKey(1)frame = self.pose_ctrl_arm.process(frame)end = time.time()fps = 1 / ((end - start)+self.fps_seconds)text = "FPS : " + str(int(fps))cv.putText(frame, text, (10,20), cv.FONT_HERSHEY_SIMPLEX, 0.8, (0,255,255), 2)if len(binary) != 0: cv.imshow('frame', ManyImgs(1, ([frame, binary])))else:cv.imshow('frame', frame)def main():rclpy.init() esp_img = MY_Picture("My_Picture")print("start it")try:rclpy.spin(esp_img)except KeyboardInterrupt:passfinally:esp_img.destroy_node()rclpy.shutdown()

可以看出首先检测到手,得到lmList的值,然后传入fingersUp函数。fingersUp函数是用来检测哪些手指是伸直的,伸直的手指的值为1,这里的具体代码也可以看media_library,py函数,里边有详细的解释,其实就是判断手指关节的xy值来判断时候伸直了。sum(fingers)函数是来计算伸直手指的数量,fingers[]可以用来枚举手指,比如说食指,咱们就是用fingers[1]来表示。

流程图如下

本文来自互联网用户投稿,该文观点仅代表作者本人,不代表本站立场。本站仅提供信息存储空间服务,不拥有所有权,不承担相关法律责任。如若转载,请注明出处:http://www.rhkb.cn/news/15876.html

如若内容造成侵权/违法违规/事实不符,请联系长河编程网进行投诉反馈email:809451989@qq.com,一经查实,立即删除!

相关文章

OpenFeign远程调用返回的是List<T>类型的数据

在使用 OpenFeign 进行远程调用时&#xff0c;如果接口返回的是 List 类型的数据&#xff0c;可以通过以下方式处理&#xff1a; 直接定义返回类型为List Feign 默认支持 JSON 序列化/反序列化&#xff0c;如果服务端返回的是 List的JSON格式数据&#xff0c;可以直接在 Feig…

【hive】记一次hiveserver内存溢出排查,线程池未正确关闭导致

一、使用 MemoryAnalyzer软件打开hprof文件 很大有30G&#xff0c;win内存24GB&#xff0c;不用担心可以打开&#xff0c;ma软件能够生成索引文件&#xff0c;逐块分析内存&#xff0c;如下图。 大约需要4小时。 overview中开不到具体信息。 二、使用Leak Suspects功能继续…

【Docker】

一、概述 1、Docker为什么出现&#xff1f; 开发和运维两套环境&#xff0c;而环境配置十分麻烦。如在Windows上开发&#xff0c;要发布到Linux上运行。 Docker给以上问题提出解决方案&#xff1a;Java --- Jar(环境&#xff09;---打包项目带上环境&#xff08;镜像&#x…

游戏手柄Type-c方案,支持一边充电一边传输数据

乐得瑞推出LDR6023SS&#xff0c;专门针对USB-C接口手机手柄方案&#xff0c;支持手机快充&#xff0c;支持任天堂游戏机&#xff0c;PS4等设备~同时支持手机充电跟数据传输 1、概述 LDR6023SS SSOP16 是乐得瑞科技针对 USB Type-C 标准中的 Bridge 设备而开发的双 USB-C DRP …

【报错解决】Sql server 2022连接数据库时显示证书链是由不受信任的颁发机构颁发的

SSMS 20在连接Sql server 2022数据库时有如下报错&#xff1a; A connection was successfully established with the server, but then an error occurred during the login process. (provider: SSL Provider, error: 0 - 证书链是由不受信任的颁发机构颁发的。 原因是尝试使…

「vue3-element-admin」告别 vite-plugin-svg-icons!用 @unocss/preset-icons 加载本地 SVG 图标

&#x1f680; 作者主页&#xff1a; 有来技术 &#x1f525; 开源项目&#xff1a; youlai-mall ︱vue3-element-admin︱youlai-boot︱vue-uniapp-template &#x1f33a; 仓库主页&#xff1a; GitCode︱ Gitee ︱ Github &#x1f496; 欢迎点赞 &#x1f44d; 收藏 ⭐评论 …

NineData云原生智能数据管理平台新功能发布|2025年1月版

本月发布 14 项更新&#xff0c;其中重点发布 6 项、功能优化 7 项、安全性更新 1 项。 重点发布 数据库 Devops - 数据导出功能增强 支持 AWS ElastiCache 数据源&#xff1a;现已支持通过 SQL 查询语句或直接通过库表导出 AWS ElastiCache 数据&#xff0c;方便用户快速提取…

游戏引擎学习第96天

讨论了优化和速度问题&#xff0c;以便简化调试过程 节目以一个有趣的类比开始&#xff0c;提到就像某些高端餐厅那样&#xff0c;菜单上充满了听起来陌生或不太清楚的描述&#xff0c;需要依靠服务员进一步解释。虽然这听起来有些奇怪&#xff0c;但实际上&#xff0c;它反映…

Docker 1. 基础使用

1. Docker Docker 是一个 基于容器的虚拟化技术&#xff0c;它能够将应用及其依赖打包成 轻量级、可移植 的容器&#xff0c;并在不同的环境中运行。 2. Docker指令 &#xff08;1&#xff09;查看已有镜像 docker images &#xff08;2&#xff09;删除镜像 docker rmi …

基于机器学习时序库pmdarima实现时序预测

目录 一、Pmdarima实现单变量序列预测1.1 核心功能与特性1.2 技术优势对比1.3 python案例1.3.1 时间序列交叉验证1.3.1.1 滚动交叉验证1.3.1.2 滑窗交叉验证 时间序列相关参考文章&#xff1a; 时间序列预测算法—ARIMA 基于VARMAX模型的多变量时序数据预测 基于机器学习时序库…

【论文笔记】Are Self-Attentions Effective for Time Series Forecasting? (NeurIPS 2024)

官方代码https://github.com/dongbeank/CATS Abstract 时间序列预测在多领域极为关键&#xff0c;Transformer 虽推进了该领域发展&#xff0c;但有效性尚存争议&#xff0c;有研究表明简单线性模型有时表现更优。本文聚焦于自注意力机制在时间序列预测中的作用&#xff0c;提…

Matlab机械手碰撞检测应用

本文包含三个部分&#xff1a; Matlab碰撞检测的实现URDF文件的制作机械手STL文件添加夹爪 一.Matlab碰撞检测的实现 首先上代码 %% 检测在结构环境中机器人是否与物体之间发生碰撞情况&#xff0c;如何避免&#xff1f; % https://www.mathworks.com/help/robotics/ug/che…

从零开始:使用Jenkins实现高效自动化部署

在这篇文章中我们将深入探讨如何通过Jenkins构建高效的自动化部署流水线&#xff0c;帮助团队实现从代码提交到生产环境部署的全流程自动化。无论你是Jenkins新手还是有一定经验的开发者&#xff0c;这篇文章都会为你提供实用的技巧和最佳实践&#xff0c;助你在项目部署中走得…

鸿蒙harmony 手势密码

1.效果图 2.设置手势页面代码 /*** 手势密码设置页面*/ Entry Component struct SettingGesturePage {/*** PatternLock组件控制器*/private patternLockController: PatternLockController new PatternLockController()/*** 用来保存提示文本信息*/State message: string …

【Unity3D】UGUI的anchoredPosition锚点坐标

本文直接以实战去理解锚点坐标&#xff0c;围绕着将一个UI移动到另一个UI位置的需求进行说明。 &#xff08;anchoredPosition&#xff09;UI锚点坐标&#xff0c;它是UI物体的中心点坐标&#xff0c;以UI物体锚点为中心的坐标系得来&#xff0c;UI锚点坐标受锚点(Anchors Min…

Mp4视频播放机无法播放视频-批量修改视频分辨率(帧宽、帧高)

背景 家人有一台夏新多功能 视频播放器(夏新多功能 视频播放器),用来播放广场舞。下载了一些广场舞视频, 只有部分视频可以播放,其他视频均无法播放,判断应该不是帧速率和数据速率的限制, 分析可能是播放器不支持帧高度大于720的视频。由于视频文件较多,需要借助视频编…

自动化xpath定位元素(附几款浏览器xpath插件)

在 Web 自动化测试、数据采集、前端调试中&#xff0c;XPath 仍然是不可或缺的技能。虽然 CSS 选择器越来越强大&#xff0c;但面对复杂 DOM 结构时&#xff0c;XPath 仍然更具灵活性。因此&#xff0c;掌握 XPath&#xff0c;不仅能提高自动化测试的稳定性&#xff0c;还能在爬…

ASP.NET Core 如何使用 C# 向端点发出 POST 请求

使用 C#&#xff0c;将 JSON POST 到 REST API 端点&#xff1b;如何从 REST API 接收 JSON 数据。 本文需要 ASP .NET Core&#xff0c;并兼容 .NET Core 3.1、.NET 6和.NET 8。 要从端点获取数据&#xff0c;请参阅本文。 使用 . 将 JSON 数据发布到端点非常容易HttpClien…

【AI学习】关于 DeepSeek-R1的几个流程图

遇见关于DeepSeek-R1的几个流程图&#xff0c;清晰易懂形象直观&#xff0c;记录于此。 流程图一 来自文章《Understanding Reasoning LLMs》&#xff0c; 文章链接&#xff1a;https://magazine.sebastianraschka.com/p/understanding-reasoning-llms?continueFlagaf07b1a0…

CSS 实现下拉菜单效果实例解析

1. 引言 在 Web 开发过程中&#xff0c;下拉菜单是一种常见且十分实用的交互组件。很多前端教程都提供过简单的下拉菜单示例&#xff0c;本文将以一个简洁的实例为出发点&#xff0c;从 HTML 结构、CSS 样式以及整体交互逻辑三个层面进行详细解析&#xff0c;帮助大家理解纯 C…