ros开发增加clion常用模板及初始化配置(五)
python模板
py_opencv_resize图片缩放_水平方向拉伸与左右截取
# 640 x 480 -> 1280 x 480(1290列 480行) frame: 原图 None:输出图不需要 fx:水平方向拉伸2倍, fy:竖直方向拉伸1.0 (不变) frame_wide = cv2.resize(frame, None, fx=2.0, fy=1.0, interpolation=cv2.INTER_LINEAR) #截取左边部分 行,列 frame_left = frame_wide[0: 480, 0: 640] #截取右边部分 frame_right = frame_wide[0: 480, 640: 1280]py_ros_rospy.Time.now当前时间戳
import rospy #指定头的时间戳 self.msg_header.stamp = rospy.Time.now()
py_roscvBridge将cv图片转成ros的Image
// image消息发送出去需要指定头信息的 self.msg_header = Header()
#指定图片id固定的值 self.msg_header.frame_id = "stereo_image" #指定头的时间戳 self.msg_header.stamp = rospy.Time.now()
import cv2 import shlex import subprocess import yaml from os import path from std_msgs.msg import Header from sensor_msgs.msg import Image, CameraInfo from cv_bridge import CvBridge, CvBridgeError import rospy # opencv图片转data数据之前创建头信息 self.msg_header = Header() # 创建图片转换器 self.bridge = CvBridge() def publish_ros_img(self, frame, img_publisher): # 将cv图片转成ros的Image try: ros_img = self.bridge.cv2_to_imgmsg(frame, "bgr8") # image消息发送出去需要指定头信息的 ros_img.header = self.msg_header # 将其发布到指定话题 img_publisher.publish(ros_img) except CvBridgeError as e: print e # 640 x 480 -> 1280 x 480(1290列 480行) frame: 原图 None:输出图不需要 fx:水平方向拉伸2倍, fy:竖直方向拉伸1.0 (不变) frame_wide = cv2.resize(frame, None, fx=2.0, fy=1.0, interpolation=cv2.INTER_LINEAR) #截取左边部分 行,列 frame_left = frame_wide[0: 480, 0: 640] #截取右边部分 frame_right = frame_wide[0: 480, 640: 1280] # cv2.imshow("left", frame_left) # cv2.imshow("right", frame_right) else: frame_left = frame frame_right = frame # cv2.imshow("frame", frame) # opencv图片转data数据之前创建头信息 self.msg_header #指定图片id固定的值 self.msg_header.frame_id = "stereo_image" #指定头的时间戳 self.msg_header.stamp = rospy.Time.now() # 发布图片 self.publish_ros_img(frame_left, self.left_img_publisher) self.publish_ros_img(frame_right, self.right_img_publisher)py_ros_header头信息使用
from std_msgs.msg import Header # opencv图片转data数据之前创建头信息 msg_header = Header() # opencv图片转data数据之前创建头信息 msg_header #指定图片id固定的值 msg_header.frame_id = "stereo_image" #指定头的时间戳 msg_header.stamp = rospy.Time.now() ros_img.header = msg_header img_publisher = rospy.Publisher("stereo/left/image_raw", Image, queue_size=1) # 将其发布到指定话题 img_publisher.publish(ros_img)py_path当前python文件,同时适用于ros环境及非ros环境加载文件
from os import path # 当前python文件所在目录 dir_name = path.dirname(path.realpath(__file__)) #通过拼接文的件路径,找到文件left.yaml; path.join:自动判断文件夹dir_name是否需要带/ file = path.join(dir_name, "./calibration/left.yaml")py_np_std标准差
#标准差 np.std(x) # np.std(input,axis=0)#安装列的方式取数据的最小值,得出每列的标准差 input是矩阵py_np_mean平均值
#平均值 np.mean(x) # np.mean(input,axis=0)#安装列的方式取数据的最小值,得出每列的平均值 input是矩阵py_np_min最小值
np.min(input,axis=0)#安装列的方式取数据的最小值,得出每列的最小值 input是矩阵py_np_std_mean数据标准化操作 _数据分布不均匀且数据差异极大的情况下使用
#计算标准差 _数据分布不均匀且数据差异极大的情况下使用 #axis=0安照列的方式取数据的.mean平均值,得出每列的最小值 input是矩阵;std:标准差 (input - np.mean(input,axis=0))/np.std(input,axis=0)py_np_max_min数据归一化操作_数据分布均匀且数据差异很小的情况下使用
#算归一化_数据分布均匀且数据差异很小的情况下使用 #axis=0 安照列的方式取数据的.mean平均值,得出每列的最小值 input是矩阵;std:标准差 (input - np.min(input,axis=0))/(np.max(input,axis=0) - np.min(input,axis=0))py_np_max最大值
np.max(x) #axis=0 安照列的方式取数据的.max最大值,得出每列的最小值 input是矩阵;std:标准差 #np.max(input,axis=0)右移会把低位去除,右移8位会把低位全部去除
01F4对应2个字节,1个字节8位, 01对应高位字节,F4对应低位字节,转成2进制表示 00000001 11110100
py_\xhh_16进制字符串转字节数组及字节数组拼接发送_bytearray
# 导入串口库 from serial import Serial #b0b1 = b'\xEB\x90' b2 = b'\x01' # data = b0b1+b2+b3+b4+b5+b6+b7+b8+b9 # 写入串口数据 字节数组 # self.serial.write(data)py_\xhh_int整形&0x00ff操作去除高位取低位_int整形转字节数组_bytearray
# 导入串口库 from serial import Serial # 获取低位 bytearray([speed&0x00ff]): int整形转字节数组 00000001 11110100==500 #取speed=500 的低位,00000001 11110100 & 0x00ff==00000000 11111111分离去除高位 =00000000 11110100 转成字节数组 b5 = bytearray([speed&0x00ff]) #b5放的是低位字节数组 F4 # data = b0b1+b2+b3+b4+b5+b6+b7+b8+b9 # 写入串口数据 字节数组 # self.serial.write(data)py_\xhh_int整形右移8位操作去除低位取高位_int整形转字节数组_bytearray
# 导入串口库 from serial import Serial #00000001 11110100==500 #取speed=500 的高位,00000001 11110100 右移8位分离去除低位 =00000000 0000001 转成字节数组 b6 = bytearray([speed>>8])#b6放的是高位字节数组 01 # data = b0b1+b2+b3+b4+b5+b6+b7+b8+b9 # 写入串口数据 字节数组 # self.serial.write(data)py_\xhh_ord_字节数据转换成u编码_如a:97_串口数据,字节数据_校验和_串口数据发送校验
# 导入串口库 from serial import Serial # 把b2+b8 取低字节 校验和 #数据校验 字节数组是不能加的,需要转换成数值在累加; 用ord可以把字节转换成编码 比如 a:97;加完后取低字节,然后转字节数组 b9 = bytearray([(ord(b2)+ord(b3)+ord(b4)+ord(b5)+ord(b6)+ord(b7)+ord(b8))&0x00ff]) #data = b0b1+b2+b3+b4+b5+b6+b7+b8+b9 # 写入串口数据 字节数组 # self.serial.write(data)py_sort链表数组排序_取前百分之10的数据
import cv2 MAX_MATCHES = 500 GOOD_MATCH_PERCENT = 0.10 # 程序的提示参数位置有问题,不能按提示来; matches = matcher.match(train_descriptor, query_descriptor, None) # 根据距离排序, 由小到大排序 matches.sort(key=lambda x: x.distance, reverse=False) # 保留前10%的排序结果. 20. good_num = int(len(matches) * GOOD_MATCH_PERCENT) good_num = max(good_num, 10) matches = matches[:good_num]py_opencv_contours_moments多边形的特征矩得到任意多边形的中心_多边形的包容盒子AABB (轴对齐包容盒)_绘制包容盒_周长, 边长几个, 面积_多边形逼近
# 多边形逼近 for cnt in contours: # 获取多边形的包容盒子AABB (轴对齐包容盒) x, y, w, h = cv2.boundingRect(cnt) # 多边形的特征矩得到任意多边形的中心 mm = cv2.moments(cnt) if mm['m00'] == 0: continue cx = mm['m10'] / mm['m00'] cy = mm['m01'] / mm['m00'] # 绘制包容盒 cv2.rectangle(dst_img_copy, (x, y), (x + w, y + h), (0, 0, 255), 3) # 周长, 边长几个, 面积, cnt_len = cv2.arcLength(cnt, True) area = cv2.contourArea(cnt) # 多边形逼近 !!!!!!! 尽可能用少的边逼近之前的多边形. approx_curve = cv2.approxPolyDP(cnt, cnt_len * 0.01, True) print(approx_curve) curve_count = approx_curve.shape[0]py_opencv_contours_boundingRect获取多边形的包容盒子AABB (轴对齐包容盒)
# 多边形逼近 for cnt in contours: # 获取多边形的包容盒子AABB (轴对齐包容盒) x, y, w, h = cv2.boundingRect(cnt)py_opencv_contours_approxPolyDP多边形逼近_OBB (Oriented Bounding Box 有向包容盒)
_, (ax1, ax2) = plt.subplots(1, 2, figsize=(20, 10)) ax1.set_title("dst_img") ax1.imshow(dst_img) ax2.set_title("binary") ax2.imshow(binary, cmap='gray') plt.show() # 查找轮廓 _, contours, hierarchy = cv2.findContours(binary, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) print("找到的轮廓数量: ", len(contours)) dst_img_copy = dst_img.copy() cv2.drawContours(dst_img_copy, contours, -1, (0, 255, 255), 2) plt.imshow(dst_img_copy) plt.show() # 多边形逼近 for cnt in contours: # 获取多边形的包容盒子AABB (轴对齐包容盒) x, y, w, h = cv2.boundingRect(cnt) # 多边形的特征矩得到任意多边形的中心 mm = cv2.moments(cnt) if mm['m00'] == 0: continue cx = mm['m10'] / mm['m00'] cy = mm['m01'] / mm['m00'] # 绘制包容盒 cv2.rectangle(dst_img_copy, (x, y), (x + w, y + h), (0, 0, 255), 3) # 周长, 边长几个, 面积, cnt_len = cv2.arcLength(cnt, True) area = cv2.contourArea(cnt) # 多边形逼近 !!!!!!! 尽可能用少的边逼近之前的多边形. 官网推荐cnt_len * 0.02 approx_curve = cv2.approxPolyDP(cnt, cnt_len * 0.01, True) print(approx_curve) #得到边数 curve_count = approx_curve.shape[0] print("面积: {}, 边个数: {}".format(area, curve_count)) #是否是凸4边型 if cv2.isContourConvex(approx_curve) and 3 < curve_count <= 6 and area > 1000: cv2.drawContours(dst_img_copy, [approx_curve], -1, (255, 0, 0), 2) # 希望最小的盒子 OBB (Oriented Bounding Box 有向包容盒) !!!!!!!!!!!!!!!!!! rect = cv2.minAreaRect(cnt) # 获取盒子的顶点作为列表 box = cv2.boxPoints(rect) center = (box[0] + box[2]) / 2 # 求中点 # 把所有数据转成int整形 box = np.int0(box) # 绘制最小OBB包容盒 cv2.drawContours(dst_img_copy, [box], -1, (0, 255, 0), 2) # 绘制中心 cv2.circle(dst_img_copy, tuple(np.int0(center)), 6, (255, 0, 0), -1) print(box) plt.imshow(dst_img_copy) plt.show()py_求余弦值,已知3个点的坐标
#p0, p1, p2为3个坐标点, p0-p1 ,p2-p1 得到两个向量 d1, d2 def angle_cos(p0, p1, p2): #p0-p1 ,p2-p1 得到两个向量 d1, d2 d1, d2 = (p0-p1).astype('float'), (p2-p1).astype('float') #得到p1点的余弦值 cosQ的值 return abs( np.dot(d1, d2) / np.sqrt( np.dot(d1, d1)*np.dot(d2, d2) ) ) # if len(approxCurve) == 4 and 3000 < area < 20000 and cv.isContourConvex(approxCurve): # approxCurve = approxCurve.reshape(-1, 2) # 由Nx1x2 -> Nx2 # 求出最大角的余弦值,如果接近90度,说明是矩形 # max_cos = np.max([angle_cos( approxCurve[i], approxCurve[(i+1) % 4], #approxCurve[(i+2) % 4] ) for i in range(4)]) # max_cos 判断是否在90度附近 3.14/2=1.57 # if max_cos < 0.2: # print("找到一个轮廓 {}: thrs:{} cnt: {} max_cos: {}".format(i, thrs, repr(approxCurve), max_cos)) # squares.append(approxCurve)
py_求解一个角度值_rotationMatrixToEulerAngles将旋转矩阵转成欧拉角_转rpy
#则如果要求解一个角度值,可以通过如下方式得到旋转的角度(单位为弧度): ''' θ=atan2(sin(θ),cos(θ)) /** * 将旋转矩阵转成欧拉角 * @param R 旋转矩阵 * @return 欧拉角 */ Vec3f rotationMatrixToEulerAngles(cv::Mat &R){ assert(isRotationMatrix(R)); float sy = sqrt(R.at<double>(0, 0) * R.at<double>(0, 0) + R.at<double>(1, 0) * R.at<double>(1, 0)); bool singular = sy < 1e-6; // If float x, y, z; if (!singular) { x = atan2(R.at<double>(2, 1), R.at<double>(2, 2)); y = atan2(-R.at<double>(2, 0), sy); z = atan2(R.at<double>(1, 0), R.at<double>(0, 0)); } else { x = atan2(-R.at<double>(1, 2), R.at<double>(1, 1)); y = atan2(-R.at<double>(2, 0), sy); z = 0; } return Vec3f(x, y, z); }; '''py_rospy_get_param_从launch文件中获取数据
''' <launch> <node pkg="robot_vision" name="face_detector" type="face_detector.py" output="screen"> <remap from="input_rgb_image" to="/usb_cam/image_raw" /> <rosparam> haar_scaleFactor: 1.2 haar_minNeighbors: 2 haar_minSize: 40 haar_maxSize: 60 </rosparam> <param name="cascade_1" value="$(find robot_vision)/data/haar_detectors/haarcascade_frontalface_alt.xml" /> <param name="cascade_2" value="$(find robot_vision)/data/haar_detectors/haarcascade_profileface.xml" /> </node> </launch> ''' # 获取haar特征的级联表的XML文件,文件路径在launch文件中传入 cascade_1 = rospy.get_param("~cascade_1", "") cascade_2 = rospy.get_param("~cascade_2", "") # 使用级联表初始化haar特征检测器 self.cascade_1 = cv2.CascadeClassifier(cascade_1) self.cascade_2 = cv2.CascadeClassifier(cascade_2) # 设置级联表的参数,优化人脸识别,可以在launch文件中重新配置 self.haar_scaleFactor = rospy.get_param("~haar_scaleFactor", 1.2) self.haar_minNeighbors = rospy.get_param("~haar_minNeighbors", 2) self.haar_minSize = rospy.get_param("~haar_minSize", 40) self.haar_maxSize = rospy.get_param("~haar_maxSize", 60)py_opencv_cv_bridge_.py图像转换ros数据互转类
#!/usr/bin/env python # -*- coding: utf-8 -*- import rospy import cv2 from cv_bridge import CvBridge, CvBridgeError from sensor_msgs.msg import Image class image_converter: def __init__(self): # 创建cv_bridge,声明图像的发布者和订阅者 self.image_pub = rospy.Publisher("cv_bridge_image", Image, queue_size=1) self.bridge = CvBridge() self.image_sub = rospy.Subscriber("/usb_cam/image_raw", Image, self.callback) def callback(self,data): # 使用cv_bridge将ROS的图像数据转换成OpenCV的图像格式 try: cv_image = self.bridge.imgmsg_to_cv2(data, "bgr8") except CvBridgeError as e: print e # 在opencv的显示窗口中绘制一个圆,作为标记 (rows,cols,channels) = cv_image.shape if cols > 60 and rows > 60 : cv2.circle(cv_image, (60, 60), 30, (0,0,255), -1) # 显示Opencv格式的图像 cv2.imshow("Image window", cv_image) cv2.waitKey(3) # 再将opencv格式额数据转换成ros image格式的数据发布 try: self.image_pub.publish(self.bridge.cv2_to_imgmsg(cv_image, "bgr8")) except CvBridgeError as e: print e if __name__ == '__main__': try: # 初始化ros节点 rospy.init_node("cv_bridge_test") rospy.loginfo("Starting cv_bridge_test node") image_converter() rospy.spin() except KeyboardInterrupt: print "Shutting down cv_bridge_test node." cv2.destroyAllWindows()py_opencv_face_detector.py人脸识别
#!/usr/bin/env python # -*- coding: utf-8 -*- import rospy import cv2 import numpy as np from sensor_msgs.msg import Image, RegionOfInterest from cv_bridge import CvBridge, CvBridgeError class faceDetector: def __init__(self): rospy.on_shutdown(self.cleanup); # 创建cv_bridge self.bridge = CvBridge() self.image_pub = rospy.Publisher("cv_bridge_image", Image, queue_size=1) # 获取haar特征的级联表的XML文件,文件路径在launch文件中传入 人脸识别文件 在深蓝的文件中 cascade_1 = rospy.get_param("~cascade_1", "") cascade_2 = rospy.get_param("~cascade_2", "") # 使用级联表初始化haar特征检测器 self.cascade_1 = cv2.CascadeClassifier(cascade_1) self.cascade_2 = cv2.CascadeClassifier(cascade_2) # 设置级联表的参数,优化人脸识别,可以在launch文件中重新配置 self.haar_scaleFactor = rospy.get_param("~haar_scaleFactor", 1.2) self.haar_minNeighbors = rospy.get_param("~haar_minNeighbors", 2) self.haar_minSize = rospy.get_param("~haar_minSize", 40) self.haar_maxSize = rospy.get_param("~haar_maxSize", 60) self.color = (50, 255, 50) # 初始化订阅rgb格式图像数据的订阅者,此处图像topic的话题名可以在launch文件中重映射 self.image_sub = rospy.Subscriber("input_rgb_image", Image, self.image_callback, queue_size=1) def image_callback(self, data): # 使用cv_bridge将ROS的图像数据转换成OpenCV的图像格式 try: cv_image = self.bridge.imgmsg_to_cv2(data, "bgr8") frame = np.array(cv_image, dtype=np.uint8) except CvBridgeError, e: print e # 创建灰度图像 grey_image = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) # 创建平衡直方图,减少光线影响 grey_image = cv2.equalizeHist(grey_image) # 尝试检测人脸 faces_result = self.detect_face(grey_image) # 在opencv的窗口中框出所有人脸区域 if len(faces_result)>0: for face in faces_result: x, y, w, h = face cv2.rectangle(cv_image, (x, y), (x+w, y+h), self.color, 2) # 将识别后的图像转换成ROS消息并发布 self.image_pub.publish(self.bridge.cv2_to_imgmsg(cv_image, "bgr8")) def detect_face(self, input_image): # 首先匹配正面人脸的模型 if self.cascade_1: faces = self.cascade_1.detectMultiScale(input_image, self.haar_scaleFactor, self.haar_minNeighbors, cv2.CASCADE_SCALE_IMAGE, (self.haar_minSize, self.haar_maxSize)) # 如果正面人脸匹配失败,那么就尝试匹配侧面人脸的模型 if len(faces) == 0 and self.cascade_2: faces = self.cascade_2.detectMultiScale(input_image, self.haar_scaleFactor, self.haar_minNeighbors, cv2.CASCADE_SCALE_IMAGE, (self.haar_minSize, self.haar_maxSize)) return faces def cleanup(self): print "Shutting down vision node." cv2.destroyAllWindows() if __name__ == '__main__': try: # 初始化ros节点 rospy.init_node("face_detector") faceDetector() rospy.loginfo("Face detector is started..") rospy.loginfo("Please subscribe the ROS image.") rospy.spin() except KeyboardInterrupt: print "Shutting down face detector node." cv2.destroyAllWindows()py_opencv_equalizeHist平衡直方图,减少光线影响
# 创建平衡直方图,减少光线影响 grey_image = cv2.equalizeHist(grey_image)py_opencv_motion_detector.py物体跟随类
#!/usr/bin/env python # -*- coding: utf-8 -*- import rospy import cv2 import numpy as np from sensor_msgs.msg import Image, RegionOfInterest from cv_bridge import CvBridge, CvBridgeError class motionDetector: def __init__(self): rospy.on_shutdown(self.cleanup); # 创建cv_bridge self.bridge = CvBridge() self.image_pub = rospy.Publisher("cv_bridge_image", Image, queue_size=1) # 设置参数:最小区域、阈值 self.minArea = rospy.get_param("~minArea", 500) self.threshold = rospy.get_param("~threshold", 25) self.firstFrame = None self.text = "Unoccupied" # 初始化订阅rgb格式图像数据的订阅者,此处图像topic的话题名可以在launch文件中重映射 self.image_sub = rospy.Subscriber("input_rgb_image", Image, self.image_callback, queue_size=1) def image_callback(self, data): # 使用cv_bridge将ROS的图像数据转换成OpenCV的图像格式 try: cv_image = self.bridge.imgmsg_to_cv2(data, "bgr8") frame = np.array(cv_image, dtype=np.uint8) except CvBridgeError, e: print e # 创建灰度图像 gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) gray = cv2.GaussianBlur(gray, (21, 21), 0) # 使用两帧图像做比较,检测移动物体的区域 if self.firstFrame is None: self.firstFrame = gray return #获取差分图 就是将两幅图像作差 两个图片相减,这里用的是灰度图,类型是uint8 frameDelta = cv2.absdiff(self.firstFrame, gray) thresh = cv2.threshold(frameDelta, self.threshold, 255, cv2.THRESH_BINARY)[1] thresh = cv2.dilate(thresh, None, iterations=2) binary, cnts, hierarchy= cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) for c in cnts: # 如果检测到的区域小于设置值,则忽略 if cv2.contourArea(c) < self.minArea: continue # 在输出画面上框出识别到的物体 (x, y, w, h) = cv2.boundingRect(c) cv2.rectangle(frame, (x, y), (x + w, y + h), (50, 255, 50), 2) self.text = "Occupied" # 在输出画面上打当前状态和时间戳信息 cv2.putText(frame, "Status: {}".format(self.text), (10, 20), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2) # 将识别后的图像转换成ROS消息并发布 self.image_pub.publish(self.bridge.cv2_to_imgmsg(frame, "bgr8")) def cleanup(self): print "Shutting down vision node." cv2.destroyAllWindows() if __name__ == '__main__': try: # 初始化ros节点 rospy.init_node("motion_detector") rospy.loginfo("motion_detector node is started...") rospy.loginfo("Please subscribe the ROS image.") motionDetector() rospy.spin() except KeyboardInterrupt: print "Shutting down motion detector node." cv2.destroyAllWindows()py_slam_exploring_slam.py自动导航建模
#!/usr/bin/env python # -*- coding: utf-8 -*- import roslib; import rospy import actionlib from actionlib_msgs.msg import * from geometry_msgs.msg import Pose, PoseWithCovarianceStamped, Point, Quaternion, Twist from move_base_msgs.msg import MoveBaseAction, MoveBaseGoal from random import sample from math import pow, sqrt class NavTest(): def __init__(self): rospy.init_node('exploring_slam', anonymous=True) rospy.on_shutdown(self.shutdown) # 在每个目标位置暂停的时间 (单位:s) self.rest_time = rospy.get_param("~rest_time", 2) # 是否仿真? self.fake_test = rospy.get_param("~fake_test", True) # 到达目标的状态 goal_states = ['PENDING', 'ACTIVE', 'PREEMPTED', 'SUCCEEDED', 'ABORTED', 'REJECTED', 'PREEMPTING', 'RECALLING', 'RECALLED', 'LOST'] # 设置目标点的位置 # 在rviz中点击 2D Nav Goal 按键,然后单击地图中一点 # 在终端中就会看到该点的坐标信息 locations = dict() locations['1'] = Pose(Point(4.589, -0.376, 0.000), Quaternion(0.000, 0.000, -0.447, 0.894)) locations['2'] = Pose(Point(4.231, -6.050, 0.000), Quaternion(0.000, 0.000, -0.847, 0.532)) locations['3'] = Pose(Point(-0.674, -5.244, 0.000), Quaternion(0.000, 0.000, 0.000, 1.000)) locations['4'] = Pose(Point(-5.543, -4.779, 0.000), Quaternion(0.000, 0.000, 0.645, 0.764)) locations['5'] = Pose(Point(-4.701, -0.590, 0.000), Quaternion(0.000, 0.000, 0.340, 0.940)) locations['6'] = Pose(Point(2.924, 0.018, 0.000), Quaternion(0.000, 0.000, 0.000, 1.000)) # 发布控制机器人的消息 self.cmd_vel_pub = rospy.Publisher('cmd_vel', Twist, queue_size=5) # 订阅move_base服务器的消息 self.move_base = actionlib.SimpleActionClient("move_base", MoveBaseAction) rospy.loginfo("Waiting for move_base action server...") # 60s等待时间限制 self.move_base.wait_for_server(rospy.Duration(60)) rospy.loginfo("Connected to move base server") # 保存机器人的在rviz中的初始位置 initial_pose = PoseWithCovarianceStamped() # 保存成功率、运行时间、和距离的变量 n_locations = len(locations) n_goals = 0 n_successes = 0 i = n_locations distance_traveled = 0 start_time = rospy.Time.now() running_time = 0 location = "" last_location = "" # 确保有初始位置 while initial_pose.header.stamp == "": rospy.sleep(1) rospy.loginfo("Starting navigation test") # 开始主循环,随机导航 while not rospy.is_shutdown(): # 如果已经走完了所有点,再重新开始排序 if i == n_locations: i = 0 sequence = sample(locations, n_locations) # 如果最后一个点和第一个点相同,则跳过 if sequence[0] == last_location: i = 1 # 在当前的排序中获取下一个目标点 location = sequence[i] # 跟踪行驶距离 # 使用更新的初始位置 if initial_pose.header.stamp == "": distance = sqrt(pow(locations[location].position.x - locations[last_location].position.x, 2) + pow(locations[location].position.y - locations[last_location].position.y, 2)) else: rospy.loginfo("Updating current pose.") distance = sqrt(pow(locations[location].position.x - initial_pose.pose.pose.position.x, 2) + pow(locations[location].position.y - initial_pose.pose.pose.position.y, 2)) initial_pose.header.stamp = "" # 存储上一次的位置,计算距离 last_location = location # 计数器加1 i += 1 n_goals += 1 # 设定下一个目标点 self.goal = MoveBaseGoal() self.goal.target_pose.pose = locations[location] self.goal.target_pose.header.frame_id = 'map' self.goal.target_pose.header.stamp = rospy.Time.now() # 让用户知道下一个位置 rospy.loginfo("Going to: " + str(location)) # 向下一个位置进发 self.move_base.send_goal(self.goal) # 五分钟时间限制 finished_within_time = self.move_base.wait_for_result(rospy.Duration(300)) # 查看是否成功到达 if not finished_within_time: #取消机器人运动,不需要运动到指定点了 self.move_base.cancel_goal() rospy.loginfo("Timed out achieving goal") else: state = self.move_base.get_state() if state == GoalStatus.SUCCEEDED: rospy.loginfo("Goal succeeded!") n_successes += 1 distance_traveled += distance rospy.loginfo("State:" + str(state)) else: rospy.loginfo("Goal failed with error code: " + str(goal_states[state])) # 运行所用时间 running_time = rospy.Time.now() - start_time running_time = running_time.secs / 60.0 # 输出本次导航的所有信息 rospy.loginfo("Success so far: " + str(n_successes) + "/" + str(n_goals) + " = " + str(100 * n_successes/n_goals) + "%") rospy.loginfo("Running time: " + str(trunc(running_time, 1)) + " min Distance: " + str(trunc(distance_traveled, 1)) + " m") rospy.sleep(self.rest_time) def update_initial_pose(self, initial_pose): self.initial_pose = initial_pose def shutdown(self): rospy.loginfo("Stopping the robot...") self.move_base.cancel_goal() rospy.sleep(2) self.cmd_vel_pub.publish(Twist()) rospy.sleep(1) def trunc(f, n): slen = len('%.*f' % (n, f)) return float(str(f)[:slen]) if __name__ == '__main__': try: NavTest() rospy.spin() except rospy.ROSInterruptException: rospy.loginfo("Exploring SLAM finished.")py_slam_startros_在代码中启动ros节点roslaunch和rosrun
import subprocess import rospy import rosnode class launch_demo: def __init__(self, cmd=None): self.cmd = cmd def launch(self): self.child = subprocess.Popen(self.cmd) return True def shutdown(self): self.child.terminate() self.child.wait() return True if __name__ == "__main__": rospy.init_node('launch_demo',anonymous=True) launch_nav = launch_demo(["roslaunch", "pibot_simulator", "nav.launch"]) launch_nav.launch() r = rospy.Rate(0.2) r.sleep() rospy.loginfo("switch map...") r = rospy.Rate(1) r.sleep() rosnode.kill_nodes(['map_server']) map_name = "/home/pibot/ros_ws/src/pibot_simulator/maps/blank_map_with_obstacle.yaml" map_node = subprocess.Popen(["rosrun", "map_server", "map_server", map_name, "__name:=map_server"]) while not rospy.is_shutdown(): r.sleep()上面使用python代码启动了一个PIBOT模拟器的导航,然后5s后切换了一个地图
使用subprocess.Popen可以启动一个进程(roslaunch或者rosrun)使用rosnode.kill_nodes可以杀死一个rosnode
py_slam_定点导航
from launch_demo import launch_demo import rospy import actionlib from actionlib_msgs.msg import * from move_base_msgs.msg import MoveBaseAction, MoveBaseGoal from nav_msgs.msg import Path from geometry_msgs.msg import PoseWithCovarianceStamped from tf_conversions import transformations from math import pi class navigation_demo: def __init__(self): self.set_pose_pub = rospy.Publisher('/initialpose', PoseWithCovarianceStamped, queue_size=5) self.move_base = actionlib.SimpleActionClient("move_base", MoveBaseAction) self.move_base.wait_for_server(rospy.Duration(60)) def set_pose(self, p): if self.move_base is None: return False x, y, th = p pose = PoseWithCovarianceStamped() pose.header.stamp = rospy.Time.now() pose.header.frame_id = 'map' pose.pose.pose.position.x = x pose.pose.pose.position.y = y q = transformations.quaternion_from_euler(0.0, 0.0, th/180.0*pi) pose.pose.pose.orientation.x = q[0] pose.pose.pose.orientation.y = q[1] pose.pose.pose.orientation.z = q[2] pose.pose.pose.orientation.w = q[3] self.set_pose_pub.publish(pose) return True def _done_cb(self, status, result): rospy.loginfo("navigation done! status:%d result:%s"%(status, result)) def _active_cb(self): rospy.loginfo("[Navi] navigation has be actived") def _feedback_cb(self, feedback): rospy.loginfo("[Navi] navigation feedback\r\n%s"%feedback) def goto(self, p): goal = MoveBaseGoal() goal.target_pose.header.frame_id = 'map' goal.target_pose.header.stamp = rospy.Time.now() goal.target_pose.pose.position.x = p[0] goal.target_pose.pose.position.y = p[1] q = transformations.quaternion_from_euler(0.0, 0.0, p[2]/180.0*pi) goal.target_pose.pose.orientation.x = q[0] goal.target_pose.pose.orientation.y = q[1] goal.target_pose.pose.orientation.z = q[2] goal.target_pose.pose.orientation.w = q[3] self.move_base.send_goal(goal, self._done_cb, self._active_cb, self._feedback_cb) return True def cancel(self): self.move_base.cancel_all_goals() return True if __name__ == "__main__": rospy.init_node('navigation_demo',anonymous=True) launch_nav = launch_demo(["roslaunch", "pibot_simulator", "nav.launch"]) launch_nav.launch() r = rospy.Rate(0.2) r.sleep() rospy.loginfo("set pose...") r = rospy.Rate(1) r.sleep() navi = navigation_demo() navi.set_pose([-0.7,-0.4,0]) rospy.loginfo("goto goal...") r = rospy.Rate(1) r.sleep() navi.goto([0.25,4, 90]) while not rospy.is_shutdown(): r.sleep()上面完成设置机器人位置和导航到某一位置的功能
navi.set_pose([-0.7,-0.4,0]) 设置机器人位于地图中位置(-0.7,-0.4) 姿态yaw=0°
navi.goto([0.25,4, 90]) 该接口调用一个服务完成机器人运动至位置(0.25,4)姿态yaw=90°
有了该接口就不难完成多点导航巡逻等应用
py_opencv_convertScaleAbs将计算sobel以及scharr算子得到的有些负值 [CV_32F位浮点型] 取绝对值()convertScaleAbs)得到正数(int 只有整数),并将数据转化到0-255之间cv.convertScaleAbs(x_scharr) =============2020-12-03
# sobel算子 #x_scharr = cv.Scharr(img, cv.CV_32F, 1, 0) # 将图像转成8位int #注意都需要cv.convertScaleAbs将得到的有些负值取绝对值得到正数,并将数据转化到0-255之间,且sobel与#Scarr算法中的数据位数都是32位浮点型的 x_scharr = cv.convertScaleAbs(x_scharr) #cv.imshow("x scharr",x_scharr)
cpp_pose计算目标位姿 = ② * ④ * ① * inv(③)无序分拣生成机器臂运动轨迹[模板匹配非固定]
// 3. 创建工具的位姿 ③ 4x4的齐次变换矩阵的逆矩阵 //VisionGrab\src\util\Rotation3DUtils.h homogeneousInverse(toolMat) 求齐次矩阵的逆矩阵 const Mat &toolMatInv = homogeneousInverse(toolMat); // 4. 取出模板到目标的位姿 ④ 4x4的齐次变换矩阵 Mat template2target = getTemplate2TargetMat(); // 1. 获取模板在相机下的位姿 ① std::cout << "获取模板在相机下的位姿templateMat: \n" << templateMat << std::endl; // 2. 读取相机的外参 ② std::cout << "读取相机的外参 extMat: \n" << extMat << std::endl; // 3. 创建工具的位姿 ③ std::cout << "创建工具的位姿 toolMatInv: \n" << toolMatInv << std::endl; // 4. 取出模板到目标的位姿 ④ std::cout << "取出模板到目标的位姿 template2target: \n" << template2target << std::endl; // 4. 计算目标位姿 = ② * ④ * ① * inv(③) // 得到真实盒子的位姿= 读取相机的外参 extMat* 取出模板到目标的位姿 template2target*获取模板在相机下的位姿templateMat*工具的位姿的逆toolMatInv // ④ * ①得到真实盒子的位姿 Mat_<double> finalMat = extMat * template2target * templateMat * toolMatInv; //VisionGrab\src\util\Rotation3DUtils.h //将4x4的变换矩阵转成xyzrpy位姿 *pose = new double[6]{t.at<double>(0),t.at<double>(1),t.at<double>(2),rpy[0],rpy[1],rpy[2]}; // 机械臂运动轨迹 double *pose = convert2pose(finalMat);cpp_getToolMat③创建工具的位姿
// ③ 3. 创建工具的位姿 Mat getToolMat() { // 3. 创建工具的位姿 ③ 4x4的齐次变换矩阵 double tool_x = 0, tool_y = 0, tool_z = 220; Mat_<double> toolMat = (Mat_<double>(4, 4) << 1, 0, 0, tool_x / 1000, 0, 1, 0, tool_y / 1000, 0, 0, 1, tool_z / 1000, 0, 0, 0, 1 ); return toolMat; } // 3. 创建工具的位姿 ③ 4x4的齐次变换矩阵的逆矩阵 //VisionGrab\src\util\Rotation3DUtils.h homogeneousInverse(toolMat) 求齐次矩阵的逆矩阵 const Mat &toolMatInv = homogeneousInverse(toolMat);cpp_getTemplateMat①机械臂要抓取的位姿_ 模板的位姿
// ① 机械臂要抓取的位姿 竖抓还是横抓 1. 获取模板在相机下的位姿 ① 无序分拣第6 Mat getTemplateMat() { Mat template_RMat = Mat_<double>(3, 3); Mat template_tMat = Mat_<double>(3, 1); // ./output/template_Rt.xml 模板位姿 机械臂要抓取的位姿 竖抓还是横抓 FileStorage rtFs(templateRtPath, FileStorage::READ); rtFs["rotation"] >> template_RMat; rtFs["translation"] >> template_tMat; rtFs.release(); //VisionGrab\src\util\Rotation3DUtils.h 将旋转矩阵和平移向量转成4x4的齐次变换矩阵 输入: template_RMat:旋转矩阵 template_tMat:平移向量 return toHomogeneousMat(template_RMat, template_tMat); } // 1. 获取模板在相机下的位姿 ① 4x4的齐次变换矩阵 Mat templateMat = getTemplateMat();cpp_getTemplate2TargetMat④ 无序分拣_读取要抓取的目标盒子所在的位置及姿态转成4*4的矩阵_模板匹配后的转换矩阵转成4*4
//④ 目标盒子所在的位置及姿态 4. 取出模板到目标的位姿 ④ 无序分拣第6 Mat getTemplate2TargetMat() { Mat template_RMat = Mat_<double>(3, 3); Mat template_tMat = Mat_<double>(3, 1); // 目标盒子所在的位置及姿态 "./template2target_Rt.xml" FileStorage rtFs(template2target_Rt_filepath, FileStorage::READ); rtFs["rotation"] >> template_RMat; rtFs["translation"] >> template_tMat; rtFs.release(); //VisionGrab\src\util\Rotation3DUtils.h 将旋转矩阵和平移向量转成4x4的齐次变换矩阵 输入: template_RMat:旋转矩阵 template_tMat:平移向量 return toHomogeneousMat(template_RMat, template_tMat); } // 4. 取出模板到目标的位姿 ④ 4x4的齐次变换矩阵 Mat template2target = getTemplate2TargetMat();cpp_getExMat②读取外参文件转成4*4的矩阵_外参是轴角对数据
#include <Eigen/Geometry> #include <opencv2/opencv.hpp> #include <opencv/cxeigen.hpp> #include <iostream> #include <opencv2/opencv.hpp> #include <Robot.h> #include <Rotation3DUtils.h> #include <Gripper.h> //②外参文件 确定相机坐标 机械臂坐标 2. 读取相机的外参 ② //外参文件 确定相机坐标 机械臂坐标 无序分拣第6 //ex_calib_file_path = "./assets/calibration_ex_params.yml"; Mat getExMat() { double Angle = 0, AxisX = 0, AxisY = 0, AxisZ = 0; double TranslationX = 0, TranslationY = 0, TranslationZ = 0; //外参文件 确定相机坐标 机械臂坐标 FileStorage fs(ex_calib_file_path, FileStorage::READ); fs["Angle"] >> Angle; fs["AxisX"] >> AxisX; fs["AxisY"] >> AxisY; fs["AxisZ"] >> AxisZ; fs["TranslationX"] >> TranslationX; fs["TranslationY"] >> TranslationY; fs["TranslationZ"] >> TranslationZ; // 将轴角对 -> 旋转矩阵 + 平移矩阵 -> 齐次矩阵 Eigen::Vector3d axisMatrix(AxisX, AxisY, AxisZ); Eigen::AngleAxisd angleAxisd(Angle, axisMatrix); const Eigen::AngleAxis<double>::Matrix3 &eigenR = angleAxisd.toRotationMatrix(); Mat cvR = Mat_<double>(3, 3); cv::eigen2cv(eigenR, cvR); Mat t = (Mat_<double>(3, 1) << TranslationX / 1000, TranslationY / 1000, TranslationZ / 1000); //VisionGrab\src\util\Rotation3DUtils.h 将旋转矩阵和平移向量转成4x4的齐次变换矩阵 输入: template_RMat:旋转矩阵 template_tMat:平移向量 Mat exMat = toHomogeneousMat(cvR, t); return exMat; } // 2. 读取相机的外参 ② 4x4的齐次变换矩阵 Mat extMat = getExMat();cpp_ros_eigen_transpose转置 _1行3列
#include <Eigen/Core> // 旋转矩阵转欧拉角. 2:z 1:y 0:x 按zyx顺序旋转 Eigen::Vector3f eulerAngles = rotation.eulerAngles(2, 1, 0); // eulerAngles.transpose() 转置 一行3列 std::cout << "yaw(z), pitch(y), roll(x): " << eulerAngles.transpose() * 180 / M_PI << std::endl; std::cout << "translation: " << translation.transpose() << std::endl;cpp_pcl_templateAligment_SampleConsensusInitialAlignment匹配器_执行模板匹配[类]
#include <iostream> #include <limits> #include <fstream> #include <vector> #include <Eigen/Core> #include <pcl/point_types.h> #include <pcl/point_cloud.h> #include <pcl/io/io.h> #include <pcl/io/pcd_io.h> #include <pcl/filters/passthrough.h> #include <pcl/filters/voxel_grid.h> #include <pcl/features/normal_3d.h> #include <pcl/features/fpfh.h> #include <pcl/registration/ia_ransac.h> #include <pcl/search/kdtree.h> #include <pcl/kdtree/kdtree_flann.h> #include <pcl/visualization/pcl_visualizer.h> #include <opencv2/opencv.hpp> #include <opencv/cxeigen.hpp> //-------------------- 匹配器,执行模板匹配 专门用来进行刚体变换匹配 匹配成功的模板点云 + 模板变换到目标的变换矩阵 // 1.从文件获取所有点云 放进object_templates集合中 // 把所有的模板点云保存到列表中 std::vector<FeatureCloud> object_templates; std::ifstream input_stream(argv[1]);// 读取文件第一个参数 ./data1/object_templates.txt // 把文件中读取的每一行数据赋值给 pcd_filename std::string pcd_filename; while (input_stream.good()) {//判断.txt中是否有数据 // 读取文件里的每一行对应的pcd文件内容,存到object_templates中 std::getline(input_stream, pcd_filename); if (pcd_filename.empty() || pcd_filename.at(0) == '#') { continue; } // 加载特征点云, 并且计算表面法向量 & FPFH直方图 FeatureCloud template_cloud; // 把文件中读取的每一行数据加载特征点云, 并且计算表面法向量 template_cloud.loadInputCloud(pcd_filename); // 数据点云加入到集合中 object_templates.push_back(template_cloud); } input_stream.close(); //条件1需要依赖这个类--FeatureCloud特征点云 获取法线 与 征描述子信息特征直方图特 类 //条件2需要添加模板点云(可添加多个,必须是具有法线与特征描述子的,转成这个FeatureCloud) 设置目标点云(必须是具有法线与特征描述子的转成这个FeatureCloud) /** * * 匹配器,执行模板匹配 * 此对象专门用来进行刚体变换匹配 * 匹配成功的模板点云 + 模板变换到目标的变换矩阵 */ class TemplateAligment{ private: // FeatureCloud特征点云 获取点云的法线 与 征描述子信息特征直方图特 vector<FeatureCloud> templates_; // 特征点云 获取点云的法线 与 征描述子信息特征直方图特 FeatureCloud target_; // 初始化 实现了RANSAC姿态估计对象 SAC-IA估算对象 在这个setTargetCloud方法中初始化; 参数1:场景输入点云的类型 参数2:模板的输入点云的类型 参数3:输出点特征的直方图的类型:FPFHSignature33的估算器 pcl::SampleConsensusInitialAlignment<pcl::PointXYZ, pcl::PointXYZ, pcl::FPFHSignature33> sac_ia_; // 最小采样距离 float min_sample_distance_; // 最大的响应距离,如果发现两个点太远了,认为它没有在模型内部 float max_correspondence_distance_; // 最大迭代次数 float max_iterations_; public: TemplateAligment() : min_sample_distance_(0.05f), max_correspondence_distance_(0.01f * 0.01f), max_iterations_(500) { // 给SAC-IA估算对象设置最小采样距离 sac_ia_.setMinSampleDistance(min_sample_distance_); // 给SAC-IA估算对象设置最大的响应距离,如果发现两个点太远了,认为它没有在模型内部 sac_ia_.setMaxCorrespondenceDistance(max_correspondence_distance_); // 给SAC-IA估算对象设置最大迭代次数 sac_ia_.setMaximumIterations(max_iterations_); } // 添加模板点云 void addTemplateCloud(FeatureCloud &cloud) { templates_.push_back(cloud); } // 给估算对象SAC-IA初始化,同时传入目标点云数据及目标点云特征描述子 void setTargetCloud(FeatureCloud &target_cloud){ target_ = target_cloud; // 给估算对象SAC-IA初始化 设置target cloud ; 设置目标点云 target_cloud.getPointCloud()获取点云 xyz_ sac_ia_.setInputTarget(target_cloud.getPointCloud()); // 设置目标点云特征描述子 sac_ia_.setTargetFeatures(target_cloud.getLocalFeatures()); }; // 对所有模板进行匹配,得到结果results struct Result { // 模板对应的匹配分数 float fitness_score; // 转换矩阵 Eigen::Matrix4f transformation; EIGEN_MAKE_ALIGNED_OPERATOR_NEW }; /** * 3. * 核心代码: * 执行匹配,并将结果设置给 result * @param result * @param template_cloud */ void align(FeatureCloud &template_cloud, TemplateAligment::Result &result){ // 设置输入源 输入点云 sac_ia_.setInputSource(template_cloud.getPointCloud()); // 设置输入点云特征 sac_ia_.setSourceFeatures(template_cloud.getLocalFeatures()); PointCloud registration_output; // 执行配准 sac_ia_.align(registration_output); // 给result设置数据 分数 result.fitness_score = (float) sac_ia_.getFitnessScore(max_correspondence_distance_); // 对应的变换矩阵 模板匹配到的变换矩阵 result.transformation = sac_ia_.getFinalTransformation(); }; // 2. 对所有模板进行匹配,得到结果results void alignAll(std::vector<Result, Eigen::aligned_allocator<Result>>& results){ results.resize(templates_.size()); for (int i = 0; i < templates_.size(); ++i) { /** 3. * 核心代码: * 执行匹配,并将结果设置给 result * @param result * @param template_cloud */ align(templates_[i], results[i]); } }; /** * 1. * 在templates_中查找与target_匹配结果最佳的点云索引 * 根据匹配分数判断(越低越好) * 查找最优解 */ int findBestAligment(TemplateAligment::Result &result){ // 对所有模板进行匹配,得到结果results std::vector<Result, Eigen::aligned_allocator<Result>> results; //2. 对所有模板进行匹配,得到结果results alignAll(results); // 对结果进行循环(匹配分数),得到分数最佳的result对象,返回对应的索引 int best_template_index = 0; float lowest_score = results[0].fitness_score; //排序 for (int i = 0; i < results.size(); ++i) { Result &result = results[i]; // 取分数最小的 if (result.fitness_score < lowest_score) { lowest_score = result.fitness_score; best_template_index = i; } } // 最佳匹配值 result = results[best_template_index]; // 最佳匹配值得索引 return best_template_index; }; }; //使用 创建匹配器,执行模板匹配 TemplateAligment template_align; // 遍历所有的模板点云object_templates集合 std::vector<FeatureCloud> object_templates; for (FeatureCloud object_templte: object_templates) { // 添加模板点云 template_align.addTemplateCloud(object_templte); } // 把目标点云封装成FeatureCloud FeatureCloud target_cloud; target_cloud.setInputCloud(cloud); // 设置目标点云 template_align.setTargetCloud(target_cloud); //得到最佳匹配模板 TemplateAligment::Result result; // 执行配准 // 返回最佳模板配准结果索引 int best_index = template_align.findBestAligment(result); // 返回最佳模板配准结果对象 FeatureCloud best_template = object_templates[best_index]; // 最佳匹配的变换,匹配分数 低于0.00002较好 std::cout << "score: " << result.fitness_score << " index: " << best_index << std::endl; std::cout << "变换矩阵:\n" << result.transformation << std::endl; Eigen::Matrix3f rotation = result.transformation.block<3, 3>(0, 0); Eigen::Vector3f translation = result.transformation.block<3, 1>(0, 3); // 旋转矩阵转欧拉角. 2:z 1:y 0:x 按zyx顺序旋转 Eigen::Vector3f eulerAngles = rotation.eulerAngles(2, 1, 0); // eulerAngles.transpose() 转置 一行3列 std::cout << "yaw(z), pitch(y), roll(x): " << eulerAngles.transpose() * 180 / M_PI << std::endl; std::cout << "translation: " << translation.transpose() << std::endl; // 对最佳匹配的模板执行以上变换 PointCloud::Ptr transformed_template_cloud(new PointCloud); //*best_template.getPointCloud() 模板点云 通过 result.transformation 变化矩阵 转换成 transformed_template_cloud点云[改变模板点云的位姿] pcl::transformPointCloud(*best_template.getPointCloud(), *transformed_template_cloud, result.transformation); // 保存变换矩阵 string template2target_Rt_filepath = "./template2target_Rt.xml"; FileStorage fs(template2target_Rt_filepath, FileStorage::WRITE); cv::Mat RMat, tMat; cv::eigen2cv(rotation, RMat); cv::eigen2cv(translation, tMat); fs << "rotation" << RMat; fs << "translation" << tMat; fs.release(); pcl::visualization::PCLVisualizer viewer("3D viewer"); // 显示目标点云 PCLHandler cloud_handler(cloud, 255, 100, 100); viewer.addPointCloud(cloud, cloud_handler, "cloud"); // 显示匹配成功的模板点云(最佳匹配) // FeatureCloud &featureCloud = object_templates.at(0); PCLHandler template_cloud_handler(best_template.getPointCloud(), 100, 255, 255); viewer.addPointCloud(best_template.getPointCloud(), template_cloud_handler, "template_cloud"); // 变换之后的模板点云(最佳匹配) PCLHandler transformed_template_cloud_handler(transformed_template_cloud, 255, 255, 255); viewer.addPointCloud(transformed_template_cloud, transformed_template_cloud_handler, "transformed_template_cloud"); viewer.addCoordinateSystem(0.5); while (!viewer.wasStopped()) {//如果viewer没有停止 viewer.spinOnce(); }cpp_ros_convert2pose 将4x4的变换矩阵转成xyzrpy位姿
cpp_opencv_convert2pose 将4x4的变换矩阵转成xyzrpy位姿
Mat_<double> finalMatUp = extMat * templateMat * toolMatUpInv; //VisionGrab\src\util\Rotation3DUtils.h // 将4x4的变换矩阵转成xyzrpy位姿 *pose = new double[6]{t.at<double>(0),t.at<double>(1),t.at<double>(2),rpy[0],rpy[1],rpy[2]}; // 机械臂运动轨迹 double *poseUp = convert2pose(finalMatUp); cout << "pose x: " << pose[0] << " y: " << pose[1] << " z: " << pose[2]; cout << " r: "<< pose[3] * RA2DE << " p: " << pose[4] * RA2DE << " y: " << pose[5] * RA2DE << endl; cout << "poseUP x: " << poseUp[0] << " y: " << poseUp[1] << " z: " << poseUp[2]; cout << " r: "<< poseUp[3] * RA2DE << " p: " << poseUp[4] * RA2DE << " y: " << poseUp[5] * RA2DE << endl;cpp_opencv_mat_创建mat矩阵
// 3. 创建工具的位姿 ③ 4x4的齐次变换矩阵 double tool_x = 0, tool_y = 0, tool_z = 220; Mat_<double> toolMat = (Mat_<double>(4, 4) << 1, 0, 0, tool_x / 1000, 0, 1, 0, tool_y / 1000, 0, 0, 1, tool_z / 1000, 0, 0, 0, 1 );cpp_ros_toHomogeneousMat旋转矩阵和平移向量转成4x4的齐次变换矩阵
cpp_opencv_toHomogeneousMat旋转矩阵和平移向量转成4x4的齐次变换矩阵
//VisionGrab\src\util\Rotation3DUtils.h 将旋转矩阵和平移向量转成4x4的齐次变换矩阵 输入: template_RMat:旋转矩阵 template_tMat:平移向量 Mat exMat = toHomogeneousMat(cvR, t);cpp_opencv_eigen2cv矩阵转mat
// eigen矩阵转mat cv::eigen2cv(rotation, RMat); cpp_ros_FileStorage保存④目标盒子所在的位置及姿态4template2target_Rt.xml // 保存变换矩阵 string template2target_Rt_filepath = "./template2target_Rt.xml"; FileStorage fs(template2target_Rt_filepath, FileStorage::WRITE); cv::Mat RMat, tMat; // eigen矩阵转mat cv::eigen2cv(rotation, RMat); cv::eigen2cv(translation, tMat); fs << "rotation" << RMat; fs << "translation" << tMat; fs.release();
cpp_opencv_fileStorage读取内参文件
内参
可以理解为焦距f分别在x,y轴方向等价的像素个数(每个像素的物理尺寸为dx×dy,单位为mm)
已知一款相机佳能80D及镜头参数如下:最高分辨率6000×40006000×4000,传感器尺寸22.3×14.9mm22.3×14.9mm,焦距f=35mm,根据相机内参定义,估算相机的内参fx,fy,u0,v0fx,fy,u0,v0
答案:
选中此区域即可见内容
fx = f / dx = 35 / (22.3/6000) = 9417.040358744 fy = f / dy = 35 / (14.9/4000) = 9395.973154362 u0= 6000 / 2 = 3000 v0= 4000 / 2 = 2000
:::tip
由于制造工艺等因素,相机像素坐标系中心未必与光轴经过的图像坐标系中心重合,即(u0,v0)(u0,v0)并非是像素坐标系的中心,而是在中心附近某个位置,焦距及像素物理尺寸也非绝对精准,故而需要通过实际的内参标定过程,确定相机的内参矩阵。
cpp_opencv_fileStorage 读取内参文件
Mat cameraMatrix = Mat_<double>(3, 3); //string camera_in_params_path = "./assets/calibration_in_params.yml"; FileStorage fs(camera_in_params_path, FileStorage::READ); fs["cameraMatrix"] >> cameraMatrix; // 比如 焦距 f=35mm dx * dy每个像素的物理尺寸为单位为mm,dx像素宽度,dy像素高度分别表示每个像素在水平u和竖直v方向上的实际物理尺寸(单位mm),即每个感光芯片的实际大小。 //最高分辨率6000×4000,传感器尺寸22.3×14.9mm ; 焦距f=35mm (光心到图像平面的距离) // dx单个像素宽度=整个传感器尺寸宽度/分辨率宽 // dy单个像素高度=整个传感器尺寸高度/分辨率高 // fx:x轴方向等价的像素个数 ; fx = f(焦距) / dx(单个像素宽度 像素宽度像素的物理尺寸) = 35(焦距) / (22.3(整个传感器尺寸宽度)/6000(分辨率宽)) = 9417.040358744 // fy:y轴方向等价的像素个数 ; fy = f(焦距) / dy(单个像素高度 像素高度像素的物理尺寸) = 35(焦距) / (14.9(整个传感器尺寸高度)/4000(分辨率高)) = 9395.973154362 double fx = cameraMatrix.at<double>(0, 0);// x轴方向等价的像素个数; fx =焦距f光心到图像平面的距离) / (dx像素宽度像素的物理尺寸/分辨率宽度6000[像素数量]) double fy = cameraMatrix.at<double>(1, 1);// y轴方向等价的像素个数; fy =焦距f光心到图像平面的距离) / (dy像素高度像素的物理尺寸/分辨率高度4000[像素数量]) // 相机像素坐标系中心 相机佳能80D及镜头参数如下:最高分辨率6000×4000 //宽: 6000 相机像素坐标系中心cx=u0= 6000 / 2 = 3000 //高: 4000 相机像素坐标系中心cy=v0= 4000 / 2 = 2000 double cx = cameraMatrix.at<double>(0, 2);//相机像素坐标系中心cx=u0 double cy = cameraMatrix.at<double>(1, 2);//相机像素坐标系中心cy=v0 cout << "fx: " << fx << endl; cout << "fy: " << fy << endl; cout << "cx: " << cx << endl; cout << "cy: " << cy << endl; /* // 遍历深度图 for (int v = 0; v < depth.rows; v++) for (int u = 0; u < depth.cols; u++) { // 获取深度图中(m,n)处的值 ushort d = depth.ptr<ushort>(v)[u]; // d 可能没有值,若如此,跳过此点 if (isnan(d) && abs(d) < 0.0001) continue; // d 存在值,则向点云增加一个点 PointT p; // 计算这个点的空间坐标 p.z = double(d) / 1000; //单位是米 p.x = (u - camera_cx) * p.z / camera_fx; p.y = (v - camera_cy) * p.z / camera_fy; // 从rgb图像中获取它的颜色 // rgb是三通道的BGR格式图,所以按下面的顺序获取颜色 Vec3b bgr = rgb.at<Vec3b>(v, u); p.b = bgr[0]; p.g = bgr[1]; p.r = bgr[2]; // 把p加入到点云中 cloud->points.push_back(p); //cout << cloud->points.size() << endl; } */cpp_opencv_fileStorage读内参_将彩色图和深度图合并成点云_通过双重循环遍历
#include <iostream> #include <opencv2/opencv.hpp> #include <sstream> #include <cstdlib> #include <pcl/io/io.h> using namespace std; using namespace cv; /** * 将彩色图和深度图合并成点云 * @param matrix 相机内参矩阵3x3 * @param rgb 彩色图 * @param depth 深度图 * @param cloud 输出点云 */ static void convert(Mat &matrix, Mat &rgb, Mat &depth, PointCloud::Ptr &cloud) { // 比如 焦距 f=35mm dx * dy每个像素的物理尺寸为单位为mm,dx像素宽度,dy像素高度分别表示每个像素在水平u和竖直v方向上的实际物理尺寸(单位mm),即每个感光芯片的实际大小。 //最高分辨率6000×4000,传感器尺寸22.3×14.9mm ; 焦距f=35mm (光心到图像平面的距离) // dx单个像素宽度=整个传感器尺寸宽度/分辨率宽 // dy单个像素高度=整个传感器尺寸高度/分辨率高 // fx:x轴方向等价的像素个数 ; fx = f(焦距) / dx(单个像素宽度 像素宽度像素的物理尺寸) = 35(焦距) / (22.3(整个传感器尺寸宽度)/6000(分辨率宽)) = 9417.040358744 // fy:y轴方向等价的像素个数 ; fy = f(焦距) / dy(单个像素高度 像素高度像素的物理尺寸) = 35(焦距) / (14.9(整个传感器尺寸高度)/4000(分辨率高)) = 9395.973154362 double camera_fx = matrix.at<double>(0, 0);// x轴方向等价的像素个数; fx =焦距f光心到图像平面的距离) / (dx像素宽度像素的物理尺寸/分辨率宽度6000[像素数量]) double camera_fy = matrix.at<double>(1, 1);// y轴方向等价的像素个数; fy =焦距f光心到图像平面的距离) / (dy像素高度像素的物理尺寸/分辨率高度4000[像素数量]) // 相机像素坐标系中心 相机佳能80D及镜头参数如下:最高分辨率6000×4000 //宽: 6000 相机像素坐标系中心cx=u0= 6000 / 2 = 3000 //高: 4000 相机像素坐标系中心cy=v0= 4000 / 2 = 2000 double camera_cx = matrix.at<double>(0, 2);//相机像素坐标系中心cx=u0 double camera_cy = matrix.at<double>(1, 2);//相机像素坐标系中心cy=v0 cout << "fx: " << camera_fx << endl; cout << "fy: " << camera_fy << endl; cout << "cx: " << camera_cx << endl; cout << "cy: " << camera_cy << endl; // 遍历深度图 for (int v = 0; v < depth.rows; v++) for (int u = 0; u < depth.cols; u++) { // 获取深度图中(m,n)处的值 ushort d = depth.ptr<ushort>(v)[u]; // d 可能没有值,若如此,跳过此点 if (isnan(d) && abs(d) < 0.0001) continue; // d 存在值,则向点云增加一个点 PointT p; // 计算这个点的空间坐标 p.z = double(d) / 1000; //单位是米 p.x = (u - camera_cx) * p.z / camera_fx; p.y = (v - camera_cy) * p.z / camera_fy; // 从rgb图像中获取它的颜色 // rgb是三通道的BGR格式图,所以按下面的顺序获取颜色 Vec3b bgr = rgb.at<Vec3b>(v, u); p.b = bgr[0]; p.g = bgr[1]; p.r = bgr[2]; // 把p加入到点云中 cloud->points.push_back(p); //cout << cloud->points.size() << endl; } // 设置并保存点云 cloud->height = 1; cloud->width = cloud->points.size(); cout << "point cloud size = " << cloud->points.size() << endl; cloud->is_dense = false; } // 内参文件 const char *cameraInCailFile = "./assets/3DCameraInCailResult.xml"; int main(){ cv::Mat cameraMatrix = cv::Mat_<double>(3, 3);// 从文件加载相机内参 FileStorage paramFs(cameraInCailFile, FileStorage::READ); paramFs["cameraMatrix"] >> cameraMatrix; cv::Mat rgb; // 从相机得到RGB彩色图 cv::Mat depth; // 从相机得到depth深度图 PointCloud::Ptr pCloud = PointCloud::Ptr(new PointCloud); convert(cameraMatrix, rgb, depth, pCloud); }
cpp_opencv_fileStorage读内参_将彩色图和深度图合并成点云_通过指针遍历,并提前准备好计算矩阵
cpp_pcl_fileStorage读内参_将彩色图和深度图合并成点云_通过指针遍历,并提前准备好计算矩阵
#include <iostream> #include <opencv2/opencv.hpp> #include <sstream> #include <cstdlib> #include <pcl/io/io.h> using namespace std; using namespace cv; float qnan_ = std::numeric_limits<float>::quiet_NaN(); const char *cameraInCailFile = "./assets/3DCameraInCailResult.xml"; Eigen::Matrix<float, 1920, 1> colmap; Eigen::Matrix<float, 1080, 1> rowmap; //const short w = 512, h = 424; const short w = 1920, h = 1080; void prepareMake3D(const double cx, const double cy, const double fx, const double fy) { float *pm1 = colmap.data(); float *pm2 = rowmap.data(); for (int i = 0; i < w; i++) { *pm1++ = (i - cx + 0.5) / fx; } for (int i = 0; i < h; i++) { *pm2++ = (i - cy + 0.5) / fy; } } /** * 根据内参,合并RGB彩色图和深度图到点云 * @param cloud * @param depthMat * @param rgbMat */ void getCloud(pcl::PointCloud<pcl::PointXYZRGB>::Ptr &cloud, Mat &depthMat, Mat &rgbMat) { const float *itD0 = (float *) depthMat.ptr(); const char *itRGB0 = (char *) rgbMat.ptr(); if (cloud->size() != w * h) cloud->resize(w * h); pcl::PointXYZRGB *itP = &cloud->points[0]; bool is_dense = true; for (size_t y = 0; y < h; ++y) { const unsigned int offset = y * w; const float *itD = itD0 + offset; const char *itRGB = itRGB0 + offset * 4; const float dy = rowmap(y); for (size_t x = 0; x < w; ++x, ++itP, ++itD, itRGB += 4) { const float depth_value = *itD / 1000.0f; if (!isnan(depth_value) && abs(depth_value) >= 0.0001) { const float rx = colmap(x) * depth_value; const float ry = dy * depth_value; itP->z = depth_value; itP->x = rx; itP->y = ry; itP->b = itRGB[0]; itP->g = itRGB[1]; itP->r = itRGB[2]; } else { itP->z = qnan_; itP->x = qnan_; itP->y = qnan_; itP->b = qnan_; itP->g = qnan_; itP->r = qnan_; is_dense = false; } } } cloud->is_dense = is_dense; } int main(){ Mat cameraMatrix = cv::Mat_<double>(3, 3); FileStorage paramFs(cameraInCailFile, FileStorage::READ); paramFs["cameraMatrix"] >> cameraMatrix; // 内参数据 // 比如 焦距 f=35mm dx * dy每个像素的物理尺寸为单位为mm,dx像素宽度,dy像素高度分别表示每个像素在水平u和竖直v方向上的实际物理尺寸(单位mm),即每个感光芯片的实际大小。 //最高分辨率6000×4000,传感器尺寸22.3×14.9mm ; 焦距f=35mm (光心到图像平面的距离) // dx单个像素宽度=整个传感器尺寸宽度/分辨率宽 // dy单个像素高度=整个传感器尺寸高度/分辨率高 // fx:x轴方向等价的像素个数 ; fx = f(焦距) / dx(单个像素宽度 像素宽度像素的物理尺寸) = 35(焦距) / (22.3(整个传感器尺寸宽度)/6000(分辨率宽)) = 9417.040358744 // fy:y轴方向等价的像素个数 ; fy = f(焦距) / dy(单个像素高度 像素高度像素的物理尺寸) = 35(焦距) / (14.9(整个传感器尺寸高度)/4000(分辨率高)) = 9395.973154362 double fx = cameraMatrix.at<double>(0, 0);// x轴方向等价的像素个数; fx =焦距f光心到图像平面的距离) / (dx像素宽度像素的物理尺寸/分辨率宽度6000[像素数量]) double fy = cameraMatrix.at<double>(1, 1);// y轴方向等价的像素个数; fy =焦距f光心到图像平面的距离) / (dy像素高度像素的物理尺寸/分辨率高度4000[像素数量]) // 相机像素坐标系中心 相机佳能80D及镜头参数如下:最高分辨率6000×4000 //宽: 6000 相机像素坐标系中心cx=u0= 6000 / 2 = 3000 double cx = cameraMatrix.at<double>(0, 2);//相机像素坐标系中心cx=u0 //高: 4000 相机像素坐标系中心cy=v0= 4000 / 2 = 2000 double cy = cameraMatrix.at<double>(1, 2);//相机像素坐标系中心cy=v0 // 提前准备计算所需参数 prepareMake3D(cx, cy, fx, fy); cv::Mat rgbMat; // 从相机得到RGB彩色图 cv::Mat depthMat; // 从相机得到depth深度图 pcl::PointCloud<pcl::PointXYZRGB>::Ptr cloud(new pcl::PointCloud<pcl::PointXYZRGB>()); getCloud(cloud, depthMat, rgbMat) }cpp_opencv_fileStorage读内参_将彩色图和深度图合并成点云_CloudMaker.h
cpp_pcl_fileStorage读内参_将彩色图和深度图合并成点云_CloudMaker.h
// // Created by ty on 20-9-30. // #ifndef VISIONGRAB_CLOUDMAKER_H #define VISIONGRAB_CLOUDMAKER_H #include <opencv2/opencv.hpp> #include <pcl/io/io.h> #include <pcl/point_types.h> using namespace std; using namespace cv; typedef pcl::PointXYZRGB PointT; typedef pcl::PointCloud<PointT> PointCloud; /** * 相机内参 * * 合成点云(彩色图,深度图) */ class CloudMaker { public: Mat cameraMatrix; explicit CloudMaker(Mat& cameraMatrix) { this->cameraMatrix = move(cameraMatrix); } /** * 将彩色和深度图转成点云图 * @param color * @param depth * @param cloud */ void convert(Mat &color, Mat &depth, PointCloud::Ptr &cloud){ // 比如 焦距 f=35mm dx * dy每个像素的物理尺寸为单位为mm,dx像素宽度,dy像素高度分别表示每个像素在水平u和竖直v方向上的实际物理尺寸(单位mm),即每个感光芯片的实际大小。 //最高分辨率6000×4000,传感器尺寸22.3×14.9mm ; 焦距f=35mm (光心到图像平面的距离) // dx单个像素宽度=整个传感器尺寸宽度/分辨率宽 // dy单个像素高度=整个传感器尺寸高度/分辨率高 // fx:焦距f在x轴的像素个数,焦距f与x轴像素个数比值 ; fx = f(焦距) / dx(单个像素宽度 像素宽度像素的物理尺寸) = 35(焦距) / (22.3(整个传感器尺寸宽度)/6000(分辨率宽)) = 9417.040358744 // fy:焦距f在y轴方向等价的像素个数 ; fy = f(焦距) / dy(单个像素高度 像素高度像素的物理尺寸) = 35(焦距) / (14.9(整个传感器尺寸高度)/4000(分辨率高)) = 9395.973154362 double fx = cameraMatrix.at<double>(0, 0);// x轴方向等价的像素个数; fx =焦距f光心到图像平面的距离) / (dx像素宽度像素的物理尺寸/分辨率宽度6000[像素数量]) double fy = cameraMatrix.at<double>(1, 1);// y轴方向等价的像素个数; fy =焦距f光心到图像平面的距离) / (dy像素高度像素的物理尺寸/分辨率高度4000[像素数量]) // 相机像素坐标系中心 相机佳能80D及镜头参数如下:最高分辨率6000×4000 //宽: 6000 相机像素坐标系中心cx=u0= 6000 / 2 = 3000 double cx = cameraMatrix.at<double>(0, 2);//相机像素坐标系中心cx=u0 //高: 4000 相机像素坐标系中心cy=v0= 4000 / 2 = 2000 double cy = cameraMatrix.at<double>(1, 2);//相机像素坐标系中心cy=v0 std::cout << "fx: " << fx << std::endl; std::cout << "fy: " << fy << std::endl; std::cout << "cx: " << cx << std::endl; std::cout << "cy: " << cy << std::endl; for (int v = 0; v < depth.rows; ++v) { for (int u = 0; u < depth.cols; ++u) { // 获取深度图中对应 的值单位(MM) float类型需要与深度相机取得深度值类型 cv_32FC1 一致, cv_32FC1=4个字节的 float存取(4*8=32) 不能用ushort;数据类型不对会出金字塔点云问题 float d = depth.ptr<float>(v)[u]; //强转成单位(米) float depth_value = float(d) / 1000.0f; //isnan(depth_value):判断是否是非数字 abs(depth_value):值是否非常小, 都舍弃掉 if (isnan(depth_value) || abs(depth_value) < 0.0001) { continue; } // d存在且有意义, 给点云添加一个点 计算好的单位都是(米) PointT p; p.z = depth_value; p.x = (u - cx) * p.z / fx; p.y = (v - cy) * p.z / fy; // 按照bgr顺序从彩色图中取颜色值; Vec3b:这个类型是不对的用Vec4b Vec4b bgr = color.at<Vec4b>(v, u); p.b = bgr[0]; p.g = bgr[1]; p.r = bgr[2]; cloud->points.push_back(p); } } // 配置点云参数 cloud->height = 1; cloud->width = cloud->points.size(); cloud->is_dense = false; std::cout << "cloud.size: " << cloud->points.size() << std::endl; }; }; #endif //VISIONGRAB_CLOUDMAKER_H // 使用 #include <iostream> #include <opencv2/opencv.hpp> #include <KinectCamera.h> #include <CloudMaker.h> #include <pcl/io/io.h> #include <pcl/io/pcd_io.h> #include <pcl/point_types.h> string camera_in_params_path = "./assets/calibration_in_params.yml"; /** * 采集彩色图片,采集深度图片 * * 根据内参,实时生成点云图 */ int main(int argc, char *argv[]) { //读取内参文件 Mat cameraMatrix = Mat_<double>(3, 3); FileStorage fs(camera_in_params_path, FileStorage::READ); fs["cameraMatrix"] >> cameraMatrix; // double fx = cameraMatrix.at<double>(0, 0); // double fy = cameraMatrix.at<double>(1, 1); // double cx = cameraMatrix.at<double>(0, 2); // double cy = cameraMatrix.at<double>(1, 2); //创建点云 PointCloud::Ptr cloud(new PointCloud()); //创建根据内参,实时生成点云图对象 CloudMaker cloudMaker(cameraMatrix); Mat colorMat, depthMat;//从相机读取 // 生成点云 colorMat:彩色图 depthMat:深度图 cloud:点云 cloudMaker.convert(colorMat, depthMat, cloud); std::cout << "开始保存数据" << std::endl; // 保存彩图,深度图 stringstream color_ss; color_ss << output_dir << "/camera_color_" << index << ".jpg"; bool rst = imwrite(color_ss.str(), colorMat); if (!rst) { std::cerr << "目标目录不存在,或不可写入" << output_dir << std::endl; break; } // 保存彩图,深度图 stringstream depth_ss; depth_ss << output_dir << "/camera_depth_" << index << ".xml"; FileStorage fs(depth_ss.str(), FileStorage::WRITE); fs << "depth" << depthMat; fs.release(); // 保存点云到文件 pcl::io::savePCDFile("./output/pcd_cloud.pcd", *cloud, true); std::cout << "点云保存成功" << std::endl; }
cpp_opencv_fileStorage 保存彩图,深度图,点云
cpp_pcl_fileStorage 保存彩图,深度图,点云
if (action == ACTION_SPACE) { std::cout << "开始保存数据" << std::endl; // 保存彩图,深度图 stringstream color_ss; color_ss << output_dir << "/camera_color_" << index << ".jpg"; bool rst = imwrite(color_ss.str(), colorMat); if (!rst) { std::cerr << "目标目录不存在,或不可写入" << output_dir << std::endl; break; } stringstream depth_ss; depth_ss << output_dir << "/camera_depth_" << index << ".xml"; FileStorage fs(depth_ss.str(), FileStorage::WRITE); fs << "depth" << depthMat; fs.release(); // 生成点云 stringstream cloud_ss; cloud_ss << output_dir << "/table_scene_" << index << ".pcd"; //默认保存可视化 ASCII的值,true:保存成Binary 二进制 不可读的,更加高效的读写 pcl::io::savePCDFile(cloud_ss.str(), *cloud, true); index ++; std::cout << "生成彩色图,深度图,点云图成功!" << std::endl; //产生拍照的感觉,图片会显示黑色胶卷图 bitwise_not(colorMat, colorMat); // pcl::io::savePCDFileASCII("./output/pcd_ascii.pcd", cloud);//保存成 ASCII 可读的 // pcl::io::savePCDFileBinary("./output/pcd_binary.pcd", cloud);//保存成Binary 二进制 不可读的,更加高效的读写 // pcl::io::savePCDFileBinaryCompressed("./output/pcd_binary_compressed.pcd", cloud); //默认保存可视化 ASCII的值,true:保存成Binary 二进制 不可读的,更加高效的读写 // pcl::io::savePCDFile("./output/pcd_save.pcd", cloud, true); // savePCDFileBinary }cpp_pcl_depth.ptr<float>(v)[u]取出深度值
// 获取深度图中对应 的值 float d = depth.ptr<float>(v)[u]; float depth_value = float(d) / 1000.0f; if (isnan(depth_value) || abs(depth_value) < 0.0001) { continue; }cpp_pcl_depth_value相机内参计算公式
// d存在且有意义, 给点云添加一个点 PointT p; p.z = depth_value; p.x = (u - cx) * p.z / fx; p.y = (v - cy) * p.z / fy; // 按照bgr顺序从彩色图中取颜色值 Vec4b bgr = color.at<Vec4b>(v, u); p.b = bgr[0]; p.g = bgr[1]; p.r = bgr[2]; cloud->points.push_back(p);cpp_pcl_savePCDFile保存点云图
pcl::io::savePCDFileASCII("./output/pcd_ascii.pcd", cloud);//保存成 ASCII 可读的 pcl::io::savePCDFileBinary("./output/pcd_binary.pcd", cloud);//保存成Binary 二进制 不可读的,更加高效的读写 pcl::io::savePCDFileBinaryCompressed("./output/pcd_binary_compressed.pcd", cloud); //默认保存可视化 ASCII的值,true:保存成Binary 二进制 不可读的,更加高效的读写 pcl::io::savePCDFile("./output/pcd_save.pcd", cloud, true); // savePCDFileBinarycmake_eigen3
include_directories(/usr/include/eigen3)launch_tf_复杂类型_立体影像处理_remap双目相机标定
<launch> <!--相机驱动--> <node pkg="stereo_camera" type="stereo_camera.py" name="stereo_cam" output="screen"> <param name="cam_id" value="2"/> </node> <!--立体影像处理 ROS_NAMESPACE=stereo rosrun stereo_image_proc stereo_image_proc--> <group ns="stereo"> <node pkg="stereo_image_proc" type="stereo_image_proc" name="stereo_image_proc" output="screen"/> </group> <!-- rviz rosrun rviz rviz -d src/stereo_camera/rviz/camera3d.rviz --> <node pkg="rviz" type="rviz" name="rviz" args="-d $(find stereo_camera)/rviz/camera3d.rviz" /> <!-- tf转换 rosrun tf static_transform_publisher 0 0 1 0 0 -1.57 map stereo_image 100 --> <node pkg="tf" type="static_transform_publisher" name="tf_map_cam" args="0 0 1 0 0 -1.57 map stereo_image 100" /> <!-- <node pkg="rbx1_bringup" type="odom_ekf.py" name="odom_ekf" output="screen"> <remap from="input" to="/robot_pose_ekf /odom_combined"/> <remap from="output" to="/odom_ekf"/> </node> --> </launch> <launch> <node pkg="rbx1_bringup" type="odom_ekf.py" name="odom_ekf" output="screen"> <remap from="input" to="/robot_pose_ekf /odom_combined"/> <remap from="output" to="/odom_ekf"/> </node> </launch>
cpp_unsigned_无符号的char类型
//无符号的char类型 unsigned char data[10];cpp_sizeof取数组的长度
//无符号的char类型 unsigned char data[10]; //取数组长度 sizeof(data)/sizeof(unsigned char)cpp_\xhh_char串口发送数据_无符号char数组_&0x00ff操作取低位_右移8位操作取数据高位_校验和
void Gripper::gripperCatch(int speed, int power) { if (speed > 1000) { speed = 1000; } if (speed < 1) { speed = 1; } if (power > 1000) { power = 1000; } if (power < 30) { power = 30; } std::cout << "夹取: speed-" << speed << " power-"<< power << std::endl; int sendSize = 10; u_char *data = new u_char[sendSize]; //unsigned char data[10]; //B0-B1: 帧头 data[0] = 0xEB; data[1] = 0x90; //B2: ID号 data[2] = 0x01; //B3: 数据体长度(b4-b8) data[3] = 0x05; //B4: 指令号 data[4] = 0x10; //B5-B8 //取speed=500 的低位,00000001 11110100 & 0x00ff==00000000 11111111分离去除高位 =00000000 11110100 data[5] = speed & 0x00ff;//b5放的是低位 F4 //取speed=500 的高位,00000001 11110100 右移8位分离去除低位 =00000000 0000001 data[6] = speed >> 8;//b6放的是高位 01 data[7] = power & 0x00ff; data[8] = power >> 8; //B9(校验位 (b2 + b3 + ... + b8) & 0xFF) data[9] = (data[2] + data[3] + data[4] + data[5] + data[6] + data[7] + data[8]) & 0xFF; rs232->Write(data, sendSize); }cpp_opencv旋转矩阵求逆_转置_平移求逆
// 读取末端旋转矩阵 Mat R = R_gripper2base[i]; // 对旋转矩阵进行转置 转置于求逆效果一样,这里用转置 Mat Rt = R.t(); R_base2gripper.push_back(Rt); // 读取末端平移矩阵 Mat t = t_gripper2base[i]; //因为包含旋转所以要左乘旋转矩阵 单平移求反-t, 因为有旋转所以需要左乘旋转矩阵 Mat t_inv = -Rt * t;cpp_opencv_ros机械臂pose转欧拉角再转旋转矩阵_转平移矩阵
double* pose = poseMap[path]; //读取机械臂运行的pose (x,y,z,r,p,y) // 旋转矩阵 RPY -> Rotation // rpy转欧拉角 cv::Vec3f eulerAngles(pose[3], pose[4], pose[5]); //欧拉角转旋转矩阵 const Mat &R = eulerAnglesToRotationMatrix(eulerAngles); std::cout << "eulerAngles: " << eulerAngles << std::endl; std::cout << "R: \n" << R << std::endl; // 转平移矩阵 Mat t = (Mat_<double>(3, 1) << pose[0], pose[1], pose[2]); std::cout << "t: " << t << std::endl; R_gripper2base.push_back(R); t_gripper2base.push_back(t / 1000); /** * 欧拉角转旋转矩阵 * @param theta * @return */ Mat eulerAnglesToRotationMatrix(cv::Vec3f &theta){ // X轴旋转矩阵 Mat R_x = (Mat_<double>(3, 3) << 1, 0, 0, 0, cos(theta[0]), -sin(theta[0]), 0, sin(theta[0]), cos(theta[0]) ); // Y轴旋转矩阵 Mat R_y = (Mat_<double>(3, 3) << cos(theta[1]), 0, sin(theta[1]), 0, 1, 0, -sin(theta[1]), 0, cos(theta[1])); // Z轴旋转矩阵 Mat R_z = (Mat_<double>(3, 3) << cos(theta[2]), -sin(theta[2]), 0, sin(theta[2]), cos(theta[2]) , 0, 0, 0, 1 ); Mat R = R_z * R_y * R_x; return R; }cpp_opencv_rodrigues罗德里格斯变换,可以将_旋转矩阵和旋转向量进行互转
// 接收结果 (标定板在相机坐标系下的位置+姿态) Mat rvec = Mat::zeros(3, 1, CV_64FC1); // 旋转向量 (弧度) Mat R; //rvec:旋转向量 转 旋转矩阵R cv::Rodrigues(rvec, R); // 罗德里格斯变换,可以将旋转矩阵和旋转向量进行互转 ,输入旋转矩阵就输出旋转向量,输入旋转向量就输出旋转矩阵cpp_求解一个角度值_rotationMatrixToEulerAngles将旋转矩阵转成欧拉角_转rpy
//则如果要求解一个角度值,可以通过如下方式得到旋转的角度(单位为弧度): //θ=atan2(sin(θ),cos(θ)) /** * 将旋转矩阵转成欧拉角 * @param R 旋转矩阵 * @return 欧拉角 */ Vec3f rotationMatrixToEulerAngles(cv::Mat &R){ assert(isRotationMatrix(R)); float sy = sqrt(R.at<double>(0, 0) * R.at<double>(0, 0) + R.at<double>(1, 0) * R.at<double>(1, 0)); bool singular = sy < 1e-6; // If float x, y, z; if (!singular) { x = atan2(R.at<double>(2, 1), R.at<double>(2, 2)); y = atan2(-R.at<double>(2, 0), sy); z = atan2(R.at<double>(1, 0), R.at<double>(0, 0)); } else { x = atan2(-R.at<double>(1, 2), R.at<double>(1, 1)); y = atan2(-R.at<double>(2, 0), sy); z = 0; } return Vec3f(x, y, z); };cpp_rospy_get_param_从launch文件中获取数据
/* <launch> <node pkg="robot_vision" name="face_detector" type="face_detector.py" output="screen"> <remap from="input_rgb_image" to="/usb_cam/image_raw" /> <rosparam> haar_scaleFactor: 1.2 haar_minNeighbors: 2 haar_minSize: 40 haar_maxSize: 60 </rosparam> <param name="cascade_1" value="$(find robot_vision)/data/haar_detectors/haarcascade_frontalface_alt.xml" /> <param name="cascade_2" value="$(find robot_vision)/data/haar_detectors/haarcascade_profileface.xml" /> </node> </launch> // 获取haar特征的级联表的XML文件,文件路径在launch文件中传入 cascade_1 = rospy.get_param("~cascade_1", "") cascade_2 = rospy.get_param("~cascade_2", "") // 使用级联表初始化haar特征检测器 self.cascade_1 = cv2.CascadeClassifier(cascade_1) self.cascade_2 = cv2.CascadeClassifier(cascade_2) // 设置级联表的参数,优化人脸识别,可以在launch文件中重新配置 self.haar_scaleFactor = rospy.get_param("~haar_scaleFactor", 1.2) self.haar_minNeighbors = rospy.get_param("~haar_minNeighbors", 2) self.haar_minSize = rospy.get_param("~haar_minSize", 40) self.haar_maxSize = rospy.get_param("~haar_maxSize", 60) */cpp_slam里程计计算
//L:\ROS Kinetic 古月(深蓝学院)\第6讲:机器人SLAM与自主导航\ROS理论与实践_6.机器人SLAM与自主导航_代码\sources\catkin_ws\src\mbot_bringup\src // 读取速度值 比较重要,主要是读取机器人的线速度与角速度来估算机器人的位姿 坐标和角度姿态 for(i = 0; i < 4; i++) { vel_left.odometry_char[i] = buf[i + 5]; vel_right.odometry_char[i] = buf[i + 9]; } // 积分计算里程计信息 vx_ = (vel_right.odoemtry_float + vel_left.odoemtry_float) / 2 / 1000; vth_ = (vel_right.odoemtry_float - vel_left.odoemtry_float) / ROBOT_LENGTH; curr_time = ros::Time::now(); double dt = (curr_time - last_time_).toSec(); double delta_x = (vx_ * cos(th_) - vy_ * sin(th_)) * dt; double delta_y = (vx_ * sin(th_) + vy_ * cos(th_)) * dt; double delta_th = vth_ * dt; x_ += delta_x; y_ += delta_y; th_ += delta_th; last_time_ = curr_time; return true;cpp_struct结构体按1个字节统计_#pragma pack(1)
//#pragma pack(1)让数据按一个字节统计 #pragma pack(1) type struct TXProtool{ uint8_t head0;//数据长度1 short type;//数据长度2 uint8_t length; } //打印数据长度 print("protocol=%d \r\n",sizeof(TXProtool));cmake_qt所有cmakelist拷贝到项目cmakelist中
# 自动调用moc,uic,rcc处理qt的扩展部分 #1.做信号曹的配置 set(CMAKE_AUTOMOC ON) #uic set(CMAKE_AUTOUIC ON) #rcc set(CMAKE_AUTORCC ON) set(CMAKE_INCLUDE_CURRENT_DIR ON) # 设置find__xxx的路径 找qt环境的目录 set(CMAKE_PREFIX_PATH ${CMAKE_PREFIX_PATH} ${CMAKE_CURRENT_SOURCE_DIR}/3rdparty/qt) # 设置输出的路径 SET(CMAKE_RUNTIME_OUTPUT_DIRECTORY "${CMAKE_CURRENT_SOURCE_DIR}/build/bin" CACHE PATH "Runtime directory" FORCE) SET(CMAKE_LIBRARY_OUTPUT_DIRECTORY "${CMAKE_CURRENT_SOURCE_DIR}/build/libs" CACHE PATH "Library directory" FORCE) SET(CMAKE_ARCHIVE_OUTPUT_DIRECTORY "${CMAKE_CURRENT_SOURCE_DIR}/build/libs" CACHE PATH "Archive directory" FORCE) # qt plugin 配置一个插件,我们没有装qt的话直接拷贝过来它有个插件找不到,装的时候会把插件放在环境变量里面,如果没有装qt的话就找不到,所以要指定file # 告诉它插件在哪个地方 Plugins=${CMAKE_CURRENT_SOURCE_DIR}/3rdparty/qt/plugins file( WRITE ${CMAKE_CURRENT_SOURCE_DIR}/build/bin/qt.conf "[Paths] #Prefix=../lib/Qt #Binaries=bin #Libraries=lib Plugins=${CMAKE_CURRENT_SOURCE_DIR}/3rdparty/qt/plugins #Imports=imports #Qml2Imports=qml" ) # 加载库 find_package(Qt5Core REQUIRED) find_package(Qt5Gui REQUIRED) find_package(Qt5Widgets REQUIRED) find_package(Qt5OpenGL REQUIRED) find_package(Qt5Network REQUIRED) # qt加载的库 设置qt libraries SET(QT_LIBRARIES ${Qt5Core_LIBRARIES} ${Qt5Gui_LIBRARIES} ${Qt5Widgets_LIBRARIES} ${Qt5OpenGL_LIBRARIES} ${Qt5Network_LIBRARIES} ${Qt5Xml_LIBRARIES} ${Qt5Qml_LIBRARIES}) # 指定包含头文件目录 include_directories( ${Qt5Core_INCLUDE_DIRS} ${Qt5Gui_INCLUDE_DIRS} ${Qt5Widgets_INCLUDE_DIRS} ${Qt5Network_INCLUDE_DIRS}) #线程 固定配置 set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -pthread -lpthread -Wl,--no-as-needed -g3 -Wall") set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fPIC")CMAKE_qt_set自动调用moc,uic,rcc处理qt的扩展部分#1.做信号曹的配置set(CMAKE_AUTOMOC ON)
# 自动调用moc,uic,rcc处理qt的扩展部分 #1.做信号曹的配置 set(CMAKE_AUTOMOC ON) #uic set(CMAKE_AUTOUIC ON) #rcc set(CMAKE_AUTORCC ON) set(CMAKE_INCLUDE_CURRENT_DIR ON)
cmake_set设置find__xxx的路径_查找依赖库_1_set(CMAKE_PREFIX_PATH ${CMAKE_PREFIX_PATH} ${CMAKE_CURRENT_SOURCE_DIR}/3rdparty/qt)
# 设置find__xxx的路径 找qt环境的目录 set(CMAKE_PREFIX_PATH ${CMAKE_PREFIX_PATH} ${CMAKE_CURRENT_SOURCE_DIR}/3rdparty/qt)cmake_build_out设置可执行文件输出路径SET(CMAKE_RUNTIME_OUTPUT_DIRECTORY "${CMAKE_CURRENT_SOURCE_DIR}/build/bin" CACHE PATH "Runtime directory" FORCE)
# 设置输出的路径 SET(CMAKE_RUNTIME_OUTPUT_DIRECTORY "${CMAKE_CURRENT_SOURCE_DIR}/build/bin" CACHE PATH "Runtime directory" FORCE) SET(CMAKE_LIBRARY_OUTPUT_DIRECTORY "${CMAKE_CURRENT_SOURCE_DIR}/build/libs" CACHE PATH "Library directory" FORCE) SET(CMAKE_ARCHIVE_OUTPUT_DIRECTORY "${CMAKE_CURRENT_SOURCE_DIR}/build/libs" CACHE PATH "Archive directory" FORCE)cmake_file_设置插件查找路径Plugins=${CMAKE_CURRENT_SOURCE_DIR}/3rdparty/qt/plugins
# qt plugin 配置一个插件,我们没有装qt的话直接拷贝过来它有个插件找不到,装的时候会把插件放在环境变量里面,如果没有装qt的话就找不到,所以要指定file # 告诉它插件在哪个地方 Plugins=${CMAKE_CURRENT_SOURCE_DIR}/3rdparty/qt/plugins file( WRITE ${CMAKE_CURRENT_SOURCE_DIR}/build/bin/qt.conf "[Paths] #Prefix=../lib/Qt #Binaries=bin #Libraries=lib Plugins=${CMAKE_CURRENT_SOURCE_DIR}/3rdparty/qt/plugins #Imports=imports #Qml2Imports=qml" )cmake_find_package_2_(Qt5Core REQUIRED)查找库_加载库
# 加载库 find_package(Qt5Core REQUIRED) find_package(Qt5Gui REQUIRED) find_package(Qt5Widgets REQUIRED) find_package(Qt5OpenGL REQUIRED) find_package(Qt5Network REQUIRED)cmake_SET_LIBRARIES_3_定义一个变量XX_LIBRARIES把库都保存下来的库LIBRARIES_SET(QT_LIBRARIES ${Qt5Core_LIBRARIES} ${Qt5Gui_LIBRARIES})
# qt加载的库 设置qt libraries SET(QT_LIBRARIES ${Qt5Core_LIBRARIES} ${Qt5Gui_LIBRARIES} ${Qt5Widgets_LIBRARIES} ${Qt5OpenGL_LIBRARIES} ${Qt5Network_LIBRARIES} ${Qt5Xml_LIBRARIES} ${Qt5Qml_LIBRARIES})cmake_include_directories_4_指定包含头文件目录include_directories( ${Qt5Core_INCLUDE_DIRS} ${Qt5Gui_INCLUDE_DIRS})
# 指定包含头文件目录 include_directories( ${Qt5Core_INCLUDE_DIRS} ${Qt5Gui_INCLUDE_DIRS} ${Qt5Widgets_INCLUDE_DIRS} ${Qt5Network_INCLUDE_DIRS})cmake_add_subdirectory设置子目录add_subdirectory(src)
# 子目录 add_subdirectory(src)cmake_set自动调用moc,uic,rcc处理qt的扩展部分set(CMAKE_AUTOMOC ON)_qt专用
# 自动调用moc,uic,rcc处理qt的扩展部分 #1.做信号曹的配置 set(CMAKE_AUTOMOC ON) #uic set(CMAKE_AUTOUIC ON) #rcc set(CMAKE_AUTORCC ON) set(CMAKE_INCLUDE_CURRENT_DIR ON)cmake_include_directories指定包含的头文件目录include_directories(../driver)
# 指定包含的头文件目录 ../表示在当前文件的上一层目录中有个driver文件夹 include_directories(../driver)cmake_add_executable_5_生成可执行程序add_executable(test main.cpp MainWindow.cpp)
add_executable(test main.cpp MainWindow.cpp)cmake_target_link_libraries_6_链接库(生成的库文件add_library)_(62)_target_link_libraries( test driver )
#生成可执行程序 add_executable(test main.cpp MainWindow.cpp) # 指定上一级目录 指定包含的头文件目录 /driver这个是文件目录 include_directories(../driver) #连接库文件 driver 是生成的库文件add_library(driver SHARED URDriver.cpp URScript.cpp),或可执行程序 target_link_libraries( test driver )cmake_add_library生成一个库文件_(61)_add_library(driver SHARED URDriver.cpp URScript.cpp)
#生成一个库文件 add_library(driver SHARED URDriver.cpp URScript.cpp) #连接库文件 target_link_libraries( driver ${QT_LIBRARIES} )cmake_target_link_libraries_6_链接库target_link_libraries( driver${QT_LIBRARIES})_单
#连接库文件 target_link_libraries( driver ${QT_LIBRARIES} )cmake_include_directories指定上一级目录include_directories(../driver)
# 指定上一级目录 指定包含的头文件目录 ../表示从当前文件的上一层文件开始查找, /driver这个是文件目录 include_directories(../driver) #引用上一级目录中的可执行程序add_executable或库文件add_library #target_link_libraries( # test # 引用上一级目录中的可执行程序add_executable或库文件add_library #)cmake_find_library查找库find_library(stu_lib333 stu ${stu_dir})
# 找学生 find_library(stu_lib333 stu ${stu_dir}) #find_library(teacher_lib333 teacher ${teacher_dir}) #message(333=${stu_lib333}) #message(666=${teacher_lib333}) # 2. 关联库 #target_link_libraries(day13_07 ${stu_lib333} ${teacher_lib333})cmake_find_library_foreach循环查找多个库foreach(name ${component_names})
# 1. 先用一个变量来存储我们要找的dll的名字。 同时查找 stu teacher 两个库,在这 ${stu_dir}/lib ${teacher_dir}/lib两个文件夹下查找 set(component_names stu teacher) foreach(name ${component_names}) message(name=${name}) # 第一次循环的时候,去找学生,找到之后赋值给这个变量heima_lib_stu # 第二次循环的时候,去找教师,找到之后赋值给这个变量heima_lib_teacher find_library(heima_lib_${name} ${name} ${stu_dir}/lib ${teacher_dir}/lib) message(lib7777=${heima_lib_${name}}) # 把这些找到的变量,都增量的赋值给一个全新的变量 set(heima_lib ${heima_lib} ${heima_lib_${name}}) endforeach() message(heima_lib=${heima_lib}) # 2. 关联库 target_link_libraries(day13_08 ${heima_lib})cmake_find_package查找路径find_package(itheima)_set(CMAKE_MODULE_PATH C:/itheima)设置查找路径
# 设置模块的路径,好方便find_package 去查找这个库。一般在这个路径下会有一个文件 Finditheima.cmake | itheimaConfig.cmake set(CMAKE_MODULE_PATH C:/itheima) # 查找itheima库 find_package(itheima) #find_package(OpenCV REQUIRED) #add_executable(day13_09 main.cpp) # 2. 关联库 #target_link_libraries(day13_09 ${heima_lib})cmake_set_CMAKE_MODULE_PATH设置查找项目中的cmake文件目录set(CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} ${CMAKE_CURRENT_SOURCE_DIR}/cmake)
set(CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} ${CMAKE_CURRENT_SOURCE_DIR}/cmake) #find_package(OpenCV REQUIRED)cmake_set_CMAKE_PREFIX_PATH 设置查找某个库的路径set(CMAKE_PREFIX_PATH ${CMAKE_PREFIX_PATH} ${CMAKE_CURRENT_SOURCE_DIR}/3rdparty/qt)
# 设置find__xxx的路径 找qt环境的目录 set(CMAKE_PREFIX_PATH ${CMAKE_PREFIX_PATH} ${CMAKE_CURRENT_SOURCE_DIR}/3rdparty/qt)cmake_find_package_查找路径_rEQUIREDfind_package(OpenCV REQUIRED)
find_package(OpenCV REQUIRED)cpp_dl_class_private构造方法私有化_无参构造_拷贝构造_赋值构造
#include <memory> private: //私有三个构造函数 URDriver();//无参构造 URDriver(const URDriver&);//拷贝构造 URDriver operator=(const URDriver&);//赋值构造cpp_dl_class_单例模式
#include <memory> //类的外面初始化对象 shared_ptr<URDriver> URDriver::instance = shared_ptr<URDriver>(new URDriver); //类的内部 private: //私有三个构造函数 URDriver();//无参构造 URDriver(const URDriver&);//拷贝构造 URDriver operator=(const URDriver&);//赋值构造 // 定义静态变量 static shared_ptr<URDriver> instance; public: //静态方法返回当前实例 static shared_ptr<URDriver> getInstance() { return instance; }cpp_shared_ptr智能指针
#include <memory> shared_ptr<URDriver> URDriver::instance = shared_ptr<URDriver>(new URDriver);cpp_pcl生成正方体_圆形
pcl::PointCloud<pcl::PointXYZ>::Ptr cloud(new pcl::PointCloud<pcl::PointXYZ>); pcl::PointCloud<pcl::PointXYZ>::Ptr inliers_cloud(new pcl::PointCloud<pcl::PointXYZ>); // 根据参数生成点云数据 cloud->width = 500; cloud->height = 1; cloud->points.resize(cloud->width * cloud->height); bool isSphere = pcl::console::find_argument(argc, argv, "-s") >= 0 || pcl::console::find_argument(argc, argv, "-sf") >= 0; for (int i = 0; i < cloud->points.size(); ++i) { cloud->points[i].x = 1024 * rand() / (RAND_MAX + 1.0f); cloud->points[i].y = 1024 * rand() / (RAND_MAX + 1.0f); if (isSphere) { // -s 生成包含外部点球体 if (i % 5 == 0) { cloud->points[i].z = 1024 * rand() / (RAND_MAX + 1.0f); } else if (i % 2 == 0) { cloud->points[i].z = sqrt(1 - (pow(cloud->points[i].x, 2) + pow(cloud->points[i].y, 2))); } else { cloud->points[i].z = -sqrt(1 - (pow(cloud->points[i].x, 2) + pow(cloud->points[i].y, 2))); } }else { // 生成包含外部点平面 -1.0 -> 1.0 if (i % 2 == 0) { cloud->points[i].z = 1024 * rand() / (RAND_MAX + 1.0f); }else { cloud->points[i].z = -1.0f * (cloud->points[i].x + cloud->points[i].y); } } }cpp_pcl带箭头输出日志 pcl::console::print_highlight ("Starting alignment...\n");
pcl::console::print_highlight ("Starting alignment...\n");cp_function<void()>用函数作为参数传参—函数指针
class URDriver { private: //连接回调函数 function<void()> connectCallBack; public: //设置回调函数 void setConnectCallBack(function<void()> connectCallBack) { this->connectCallBack = connectCallBack; } //调用函数 void runConnectCallBack() { this.connectCallBack(); } } //使用:在另一个类方法中调用上面的URDriver 对象传再入匿名函数[this]{}作为URDriver类中成员function<void()> connectCallBack的参数 void MainWindow::setCallBack() { //连接回调 URDriver::getInstance()->setConnectCallBack([this]{ //更新界面状态 statusLabel->setText("已连接"); }); //断开连接回调 URDriver::getInstance()->setDisConnectCallBack([this]{ //更新界面状态 statusLabel->setText("未连接"); }); }cpp_qt把数字转换成QString _msg= QString::number(15.2)_s数据类型转换;
QString msg= QString::number(15.2);cpp_qt_QString 转QbyteArray
//1.拼接发送的指令字符串 QString msg = script.loadMovelScript(pose,a,v); //2.通过socket发送拼接的字符串 socket.write(msg.toUtf8());cpp_opencv_converTo_s数据类型转换cv_8u;
Mat result // 转成 8位无符号数据类型 result.cv_converTo(result,cv_8u)cpp_opencv_normalize归一化到0和1之间
Mat result; //dis:输入 result:输出 0,1: 归一化0到1之间 cv.normalize(dis,result,0,1, NORM_MINMAX)cpp_memcpy_数组拷贝赋值memcpy(&urData.MsgSize, pdata, 4)_ char *p = pdata;
#include <memory.h> //参考官网下载的clinet_interface_v3.11 .xlsx 文件 ,用这个文件看读取机器臂返回数据是如何解析的,. 本机器臂版本3.9,对应端口30003 ,找文件中realtime 3.7-3.9那一页数据 其中在41行 显示数据长度1108个字节,可以打印出来看看是否一致 qDebug()<<data.size()<<endl //最终139个字段, 1108个字节解析成139个字段,用结构体 data.h保存 QObject::connect(&socket, &QTcpSocket::readyRead, [this] { //读取数据 QByteArray data = socket.readAll(); URData urData; //data.h结构体用来保存数据 //解析数据 \x指的是16进制数据 0xff为最大值 0-255 1byte=8bit 2^8 256 parseData(data, urData); //决定是否执行下一条指令 decideExcuteNextInstruction(urData); }); // 网络传输是大端模式 //解析ur返回的数据 URData urData; data.h结构体用来保存数据 void URDriver::parseData(QByteArray &data, URData &urData) { //先解析第一个数据MsgSize //拷贝前四个字节 到URData结构体 // char *pdata = data.data(); char pdata[1108]; //把data所有的数据拷贝 memcpy(pdata, data.data(), data.size()); /*-------------------------- 定义指针保存首地址 --------------------------*/ char *p = pdata; /*-------------------------- 拷贝前4个字节 --------------------------*/ //交换前四个字节 reverseByte(p, 4); //拷贝前四个字节 memcpy(&urData.MsgSize, pdata, 4); /*-------------------------- 交换剩下的数据 --------------------------*/ for (char *q = p + 4; q < p + 1108 - 8; q += 8) { //交换这8个数据 reverseByte(q, 8); } //拷贝剩下的数据 memcpy(((char *) &urData) + 8, p + 4, data.size() - 4); } void URDriver::addInstruction(MOVETYPE movetype, double *data, double a, double v) { //构建指令结构体 Instruction instruction; instruction.movetype = movetype; //拷贝data后面6个数据到指令的data中 注意:后面的参数是字节的意思 6 个double数据 memcpy(instruction.data, data, 6 * sizeof(double)); instruction.a = a; instruction.v = v; //添加到队列中 instructionQueue.push(instruction); }URData
// // Created by wt on 2020/6/4. // #ifndef URDRIVERCPP_DATA_H #define URDRIVERCPP_DATA_H //连接上机械臂,机械臂返回的数据 struct URData{ int MsgSize;//121 double Time; double q_target[6]; double qd_target[6]; double qdd_target[6]; double I_target[6]; double M_target[6]; //当前关节角度 double q_actual[6];//关节角度 double qd_actual[6]; double I_actual[6]; double I_control[6]; //当前工具位置 double Tool_vector_actual[6]; //位置+姿态 double TCP_speed_actual[6]; double TCP_force[6]; double Tool_vector_target[6]; double TCP_speed_target[6]; double Digital_input_bits; double Motor_temperatures[6]; double Controller_Timer; double Test_value; double Robot_Mode; double Joint_Modes[6]; double Safety_Mode; double Buffer1[6]; double Tool_Accelerometer_values[3]; double Buffer2[6]; double Speed_scaling; double Linear_momentum_norm; double Buffer3; double Buffer4; double V_main; double V_robot; double I_robot; double V_actual[6]; double Digital_outputs; //机械臂状态:1 空闲 2 工作 double Program_state; double Elbow_position[3]; double Elbow_velocity[3]; }; //枚举 enum MOVETYPE{ MOVEJ, MOVEL }; //指令结构体 struct Instruction{ MOVETYPE movetype; double data[6]; double a; double v; }; #endif //URDRIVERCPP_DATA_H
cpp_reverseByte大小端数据转换工具
#include "utils.h" #include <math.h> #include <memory.h> void reverseByte(char *p, int size) { if (size == 4) { //0001 char tmp = p[0];//*p p[0] = p[3];//*(p+3) p[3] = tmp; tmp = p[1]; p[1] = p[2]; p[2] = tmp; } else if (size == 8) { //10000000 char tmp = p[0]; p[0] = p[7]; p[7] = tmp; tmp = p[1]; p[1] = p[6]; p[6] = tmp; tmp = p[2]; p[2] = p[5]; p[5] = tmp; tmp = p[3]; p[3] = p[4]; p[4] = tmp; } } /*-------------------------- 定义指针保存首地址 --------------------------*/ char *p = pdata; /*-------------------------- 拷贝前4个字节 --------------------------*/ //交换前四个字节 网络数据存在大小端模式,必须转换reverseByte reverseByte(p, 4); //拷贝前四个字节 memcpy(&urData.MsgSize, pdata, 4); /*-------------------------- 交换剩下的数据 --------------------------*/ for (char *q = p + 4; q < p + 1108 - 8; q += 8) { //交换这8个数据 reverseByte(q, 8); } //拷贝剩下的数据 memcpy(((char *) &urData) + 8, p + 4, data.size() - 4);cpp_memcpy_数组拷贝赋值_简
#include <memory.h> // urData结构体中的int数据 MsgSize四个字节; 把4个字节的数据拷贝到int类型的MsgSize中; pdata的char数组中有很多数据 memcpy(&urData.MsgSize, pdata, 4);cpp_判断当前处理器cpu的数据类型存储方式是小端还是大端
#include <iostream> using namespace std; // 判断当前处理器cpu的数据类型存储方式是小端还是大端 int main(int argc,char *argv[]){ int a = 1; char *pc = (char *)&a; if (*pc==1){ cout<<"小端"<<endl; }else{ cout<<"大端"<<endl; }cpp_char *q = p + 4通过移动地址位数来遍历数据for (char *q = p + 4; q < p + 1108 - 8; q += 8)
char pdata[1108]; //把data所有的数据拷贝 memcpy(pdata, data.data(), data.size()); /*-------------------------- 定义指针保存首地址 --------------------------*/ char *p = pdata; /*-------------------------- 拷贝前4个字节 --------------------------*/ //交换前四个字节 reverseByte(p, 4); //拷贝前四个字节 memcpy(&urData.MsgSize, pdata, 4); /*-------------------------- 交换剩下的数据 p是地址 p+4的意思是从p的地址开始往后移动4位,,p + 1108 - 8,从p的地址开始往后移动1108-8位 --------------------------*/ for (char *q = p + 4; q < p + 1108 - 8; q += 8) { //交换这8个数据 reverseByte(q, 8); }cpp_ros_robot_getToolMat获取工具坐标
// ③ 3. 创建工具的位姿 Mat getToolMat() { // 3. 创建工具的位姿 ③ 4x4的齐次变换矩阵 double tool_x = 0, tool_y = 0, tool_z = 220; Mat_<double> toolMat = (Mat_<double>(4, 4) << 1, 0, 0, tool_x / 1000, 0, 1, 0, tool_y / 1000, 0, 0, 1, tool_z / 1000, 0, 0, 0, 1 ); return toolMat; } // 3. 创建工具的位姿 ③ 4x4的齐次变换矩阵的逆矩阵 //VisionGrab\src\util\Rotation3DUtils.h homogeneousInverse(toolMat) 求齐次矩阵的逆矩阵 // const Mat &toolMatInv = homogeneousInverse(toolMat);cmake_qt_ui_cmake中导入qt—windows
#1. 下载qtcreate #2. 安装qtcreate 沟选第一条qt中的 Mingw, 选择安装目录C:/Qt/Qt5.14.0 #> 需要登录,且安装选择有mingw 支持。 #3. clion 里面的mingw 需要切换成qt中的tool里面的mingw #4. 需要在环境变量中配置 tool & lib的路径 cmake_minimum_required(VERSION 3.15) project(qtdemo) set(CMAKE_CXX_STANDARD 14) set(CMAKE_AUTOMOC ON) # #设置目标关联的*.h, *.cpp 使用 Qt moc进行编译 set(CMAKE_AUTORCC ON) # #设置目标关联的*.ui 使用 Qt uic进行编译 set(CMAKE_AUTOUIC ON) # automoc能根据*.h,*.cpp代码里面的宏(Q_OBJECT;Q_GADGET;Q_NAMESPACE)自动判断是否需要使用moc # 设置查找的路径 set(CMAKE_PREFIX_PATH "C:/Qt/Qt5.14.0/5.14.0/mingw73_64/lib/cmake") # 设置输出的路径 ;后加的,试试是否有问题 SET(CMAKE_RUNTIME_OUTPUT_DIRECTORY "${CMAKE_CURRENT_SOURCE_DIR}/build/bin" CACHE PATH "Runtime directory" FORCE) SET(CMAKE_LIBRARY_OUTPUT_DIRECTORY "${CMAKE_CURRENT_SOURCE_DIR}/build/libs" CACHE PATH "Library directory" FORCE) SET(CMAKE_ARCHIVE_OUTPUT_DIRECTORY "${CMAKE_CURRENT_SOURCE_DIR}/build/libs" CACHE PATH "Archive directory" FORCE) # 查找qt5这个包, 这个包下有很多组件,都可以直接在后面跟。 # 不能直接写 find_package(Qt5) find_package(Qt5 COMPONENTS Core Gui Widgets ) add_executable(qtdemo main.cpp) # 关联动态库, 这里之关联了widgets 其实还可以关联更多。 target_link_libraries(qtdemo Qt5::Widgets)cpp_qt_ui_main显示窗口
#include <iostream> #include <QApplication> #include <QWidget> int main(int argc , char **argv) { QApplication qa(argc , argv); //创建窗口 QWidget w; //显示窗口 w.show(); std::cout << "Hello, World!" << std::endl; return qa.exec(); }cpp_qt_ui_myWindow窗口类class
class MyWindow : public QWidget{ public: MyWindow(){ cout << "执行了窗口的构造" << endl; //1. 创建按钮 // QPushButton *btn = new QPushButton(); //设置它的父亲是 这个窗口,也就是表示,它是窗口的一份子。 表示父窗口是谁。 //btn->setParent(this); //2. 创建按钮 通过构造创建对象 参数一: 内容名称, 参数二:表示父窗口是谁。 QPushButton *btn = new QPushButton("button" ,this); //设置窗口图标 setWindowIcon(QIcon("../nlm_icon.jpg")); //设置窗口标题 setWindowTitle(QString::fromLocal8Bit("中文")); //设置大小 允许拖拽 resize(400,300); } ~MyWindow(){ cout << "执行了窗口的析构" << endl; } }; //程序显示 int main(int argc , char **argv) { QApplication qa(argc , argv); //创建窗口 MyWindow w; //显示窗口 w.show(); std::cout << "Hello, World!" << std::endl; return qa.exec(); }cpp_qt_ui按钮 QPushButton
//记住一个核心: 所有的空间应该都依附于窗口里面。不要直接独立显示。 //参数一: 内容, 参数二:表示父窗口是谁。 QPushButton *btn = new QPushButton("button" ,this);cpp_qt_ui文本 QLabel显示中文需要转化
//参数一: 内容-显示中文,需要转化, 参数二:表示父窗口是谁。 QLabel *q = new QLabel(QString::fromLocal8Bit("姓名: ") ,this );cpp_qt_ui_QLineEdit输入框
QLineEdit *edit = new QLineEdit( this); edit->setPlaceholderText(QString::fromLocal8Bit("请输入姓名"));cpp_qt_ui最大和最小的大小值,可以使用鼠标拖拽
//表示最大和最小的大小值,可以使用鼠标拖拽 setMaximumSize(600,500); setMinimumSize(400,300); //设置固定大小 : 禁止放大和拖拽 setFixedSize(100,50); //设置位置 从左往右: x坐标, y的坐标 宽度和高度 setGeometry(50 ,50 , 40,20);cpp_qt_ui所有布局
//在Qt里面布局分为四个大类 :QBoxLayout | QFormLayout | QGridLayout | QStackedLayout //QBoxLayout : 直译为:盒子布局,快速构建的话,一般使用它的两个子类QHBoxLayout 和 QVBoxLayout //负责水平和垂直布局 //QGridLayout : 网格布局,有的人称之为九宫格布局 //QFormLayout: 一般适用于提交数据form表单。比如: 登录,注册类似的场景 //QStackedLayout : 提供了多页面切换的布局,一次只能看到一个界面。cpp_qt_ui显示中文需要转化 QString::fromLocal8Bit("抽烟")
QString::fromLocal8Bit("抽烟")cpp_qt_ui_myWindow_参考windows
MyWindow(){ cout << "执行了窗口的构造" << endl; //创建垂直布局 QVBoxLayout *layout = new QVBoxLayout; //第一组单选组 QGroupBox *box1 = new QGroupBox; //使用垂直布局包装 QVBoxLayout *layout1 = new QVBoxLayout; QRadioButton *btn1 = new QRadioButton(QString::fromLocal8Bit("抽烟")); QRadioButton *btn2 = new QRadioButton(QString::fromLocal8Bit("喝酒")); QRadioButton *btn3 = new QRadioButton(QString::fromLocal8Bit("烫头")); layout1->addWidget(btn1); layout1->addWidget(btn2); layout1->addWidget(btn3); //把包装好的选项,放到groupBox中。 box1->setLayout(layout1); //第二个单选组 QGroupBox *box2 = new QGroupBox(QString::fromLocal8Bit("性别: ")); QHBoxLayout *layout2 = new QHBoxLayout; QRadioButton *btn4 = new QRadioButton(QString::fromLocal8Bit("男")); QRadioButton *btn5 = new QRadioButton(QString::fromLocal8Bit("女")); layout2->addWidget(btn4); layout2->addWidget(btn5); //把包装好的选项,放到groupBox中。 box2->setLayout(layout2); //把两组放到整体布局中 layout->addWidget(box1); layout->addWidget(box2); //设置这个窗口的布局 setLayout(layout); }cpp_qt_ui_paintEvent_绘画事件 =============2020-12-03
//绘制直线 class QLineWnd : public QWidget{ void paintEvent(QPaintEvent *event){ QPainter * painter = new QPainter(this); QPen *p = new QPen(QColor(Qt::GlobalColor::red)); painter->setPen(*p); painter->drawLine(100,100,200,200); } }; //绘制文字 //绘制事件 void MainWindow::paintEvent(QPaintEvent *event){ //创建画家 QPainter *painter = new QPainter(this); //画笔 QPen *p = new QPen(QColor(Qt::GlobalColor::red)); //替换画笔 painter->setPen(*p); //绘制文字 painter->drawText(300,300,"中国"); } //绘制矩形 //绘制事件 void MainWindow::paintEvent(QPaintEvent *event){ //创建画家 QPainter *painter = new QPainter(this); //画笔 QPen *p = new QPen(QColor(Qt::GlobalColor::red)); //替换画笔 painter->setPen(*p); //绘制矩形 painter->drawRect(50,50,200,200) } //绘制圆形 //绘制事件 void MainWindow::paintEvent(QPaintEvent *event){ //创建画家 QPainter *painter = new QPainter(this); //画笔 QPen *p = new QPen(QColor(Qt::GlobalColor::red)); //替换画笔 painter->setPen(*p); //绘制圆形 painter->drawEllipse(200,200,100,100); } //绘制多边形 //绘制事件 void MainWindow::paintEvent(QPaintEvent *event){ //创建画家 QPainter *painter = new QPainter(this); //画笔 QPen *p = new QPen(QColor(Qt::GlobalColor::red)); //替换画笔 painter->setPen(*p); QPoint point1(100,100); QPoint point2(100,150); QPoint point3(150,100); QPoint point4(150,200); QPoint point5(100,100); QPoint arr[] = {point1,point2,point3,point4,point5}; //绘制多边形 painter->drawConvexPolygon(arr,4); }
