CRAIC-2024目标射击实践过程
大家好,今天介绍一下2024年CRAIC-2024挑战赛/机器人任务挑战赛(目标射击)我的实践过程叭~
CRAIC目标射击实践
1. 比赛介绍
- 中国机器人及人工智能大赛是一项历史悠久、影响广泛的全国性学科竞赛。目前大赛已为我国培养了大量“能动手”、“敢创新”、“善协作”的复合型人才。大赛已列入中国高等教育学会发布的全国普通高等学校学科竞赛排行榜、全国普通高校大学生竞赛分析报告。为积极响应中国机器人及人工智能大赛组委会号召以及进一步发挥大赛培养学生主动学习、主动创新、主动创造的能力,激发广大学生探索、应用、创新、创造新技术的热情,有效推进相关专业人才培养。
2. 比赛任务
- (1)到达目标点 1 (10’)
- (2)击中环形标靶(击中环数*2) (20’)
- (3)到达目标点 2 (10’)
- (4)击倒 2 前方的标靶 (20’)
- (5)到达目标点 3 (10’)
- (6)击倒 3 前方的任务标靶 (20’)
- (7)到达终点区域 (10’)
- (8)竞赛文档 (10’)
- 注:每场比赛发生以下情况之一,则比赛结束:
- 1、参赛队员举手示意结束比赛时,比赛结束。
- 2、机器人完全进入“终点”区域,比赛结束。
- 3、机器人在比赛过程中触碰到围挡,比赛结束。
- 4、裁判宣布比赛开始后机器人 30s 未开始运动比赛结束。
- 5、机器人运行过程中,参赛队员进入场地时,比赛结束。
- 6、比赛过程中裁判组有权根据机器人运行状态宣布比赛结束。
- (例如:机器人程序死机、机器人超过 20s 状态未发生变化)。
- 以上情况,现场比赛成绩为结束当时的得分和用时。
- 注意:每场比赛总用时不超过 2 分钟,即裁判宣布比赛开始时开始计时 2 分钟,2 分钟计时结束则比赛结束,只记录 2 分钟时间内的成绩
2.1 实践过程
代码部分
1 二维码识别
二维码识别部分,脚本放在abot_ws/src/robot_slam/scripts/ar_demo_s.py1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42#!/usr/bin/env python
# -*- coding: utf-8 -*-
import rospy
from ar_track_alvar_msgs.msg import AlvarMarkers
class ARTracker:
def __init__(self):
# 初始化ROS节点,命名为'ar_tracker_node',并设置为匿名节点
rospy.init_node('ar_tracker_node', anonymous=True)
# 创建一个订阅者,订阅AR标记的消息,消息类型为AlvarMarkers,回调函数为ar_cb
self.sub = rospy.Subscriber('/ar_pose_marker', AlvarMarkers, self.ar_cb)
# 初始化AR标记的x和y坐标
self.ar_x_0 = 0.0
self.ar_y_0 = 0.0
# 初始化AR标记的ID
self.id = None
# AR标记消息的回调函数
def ar_cb(self, data):
# 遍历接收到的所有AR标记
for marker in data.markers:
# 如果AR标记的ID为0
if marker.id == 0:
# 更新AR标记的x和y坐标
self.ar_x_0 = marker.pose.pose.position.x
self.ar_y_0 = marker.pose.pose.position.y
# 更新AR标记的ID
self.id = marker.id
# 打印检测到的AR标记的ID和位置信息
print('Detected AR Marker ID:', self.id)
print('AR Marker Position (x,y):', self.ar_x_0, self.ar_y_0)
if __name__ == '__main__':
try:
# 创建ARTracker对象
ar_tracker = ARTracker()
# 进入ROS事件循环
rospy.spin()
except rospy.ROSInterruptException:
pass2 二维码射击
二维码射击部分,脚本放在abot_ws/src/robot_slam/scripts/ar_shoot_demo.py1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53#!/usr/bin/env python
# -*- coding: utf-8 -*-
# 上面这行是为了告诉操作系统,这是一个Python脚本,可以直接运行
import rospy
from ar_track_alvar_msgs.msg import AlvarMarkers
from geometry_msgs.msg import Twist
# 定义Yaw阈值
Yaw_th = 0.0045
class ARTracker:
def __init__(self):
# 初始化ROS节点,命名为'ar_tracker_node',并设置为匿名节点
rospy.init_node('ar_tracker_node', anonymous=True)
# 创建一个订阅者,订阅AR标记的消息,消息类型为AlvarMarkers,回调函数为ar_cb
self.sub = rospy.Subscriber('/ar_pose_marker', AlvarMarkers, self.ar_cb)
# 创建一个发布者,用于发布Twist类型的消息到/cmd_vel话题
self.pub = rospy.Publisher("/cmd_vel", Twist, queue_size=1000)
# AR标记消息的回调函数
def ar_cb(self, data):
global ar_x, ar_x_abs, Yaw_th
# 获取所有AR标记
ar_markers = data
# 遍历接收到的所有AR标记
for marker in data.markers:
# 如果AR标记的ID为0
if marker.id == 0:
# 获取AR标记的x坐标
ar_x = marker.pose.pose.position.x
# 计算AR标记x坐标的绝对值
ar_x_abs = abs(ar_x)
# 如果AR标记的x坐标绝对值大于等于Yaw阈值
if ar_x_abs >= Yaw_th:
# 创建一个Twist消息
msg = Twist()
# 设置消息的角速度为AR标记x坐标的相反值(*-1)
msg.angular.z = -1.5 * ar_x
# 发布Twist消息
self.pub.publish(msg)
# 如果AR标记的x坐标绝对值小于Yaw阈值
elif ar_x_abs < Yaw_th:
print "ok"
if __name__ == '__main__':
try:
# 创建ARTracker对象
ar_tracker = ARTracker()
# 进入ROS事件循环
rospy.spin()
except rospy.ROSInterruptException:
pass3 图像识别
图像识别部分,脚本放在abot_ws/src/robot_slam/scripts/find_demo_s.py1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33#!/usr/bin/env python
# -*- coding: utf-8 -*-
# 上面这行是为了告诉操作系统,这是一个Python脚本,可以直接运行
import rospy
from geometry_msgs.msg import Point
class object_position:
def __init__(self):
# 初始化ROS节点,命名为'object_position_node',并设置为匿名节点
rospy.init_node('object_position_node', anonymous=True)
# 创建一个订阅者,订阅/object_position话题,消息类型为Point,回调函数为find_cb
self.find_sub = rospy.Subscriber('/object_position', Point, self.find_cb)
# /object_position话题的回调函数
def find_cb(self, data):
# 获取接收到的Point消息
point_msg = data
# 打印接收到的点的x坐标
print('x:', point_msg.x)
# 打印接收到的点的y坐标
print('y:', point_msg.y)
# 打印接收到的点的z坐标
print('z:', point_msg.z)
if __name__ == '__main__':
try:
# 创建object_position对象
object_position = object_position()
# 进入ROS事件循环
rospy.spin()
except rospy.ROSInterruptException:
pass4 图像识别射击
图像识别射击部分,脚本放在abot_ws/src/robot_slam/scripts/find_shoot_demo.py1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61#!/usr/bin/env python
# -*- coding: utf-8 -*-
# 上面这行是为了告诉操作系统,这是一个Python脚本,可以直接运行
import rospy
from geometry_msgs.msg import Point, Twist
import serial
import time
from std_msgs.msg import String
# 设置串口和波特率
serialPort = "/dev/shoot"
baudRate = 9600
# 打开串口
ser = serial.Serial(port=serialPort, baudrate=baudRate, parity="N", bytesize=8, stopbits=1)
class object_position:
def __init__(self):
# 初始化ROS节点,命名为'object_position_node',并设置为匿名节点
rospy.init_node('object_position_node', anonymous=True)
# 创建一个订阅者,订阅/object_position话题,消息类型为Point,回调函数为find_cb
self.find_sub = rospy.Subscriber('/object_position', Point, self.find_cb)
# 创建一个发布者,用于发布Twist类型的消息到/cmd_vel话题
self.pub = rospy.Publisher("/cmd_vel", Twist, queue_size=1000)
# /object_position话题的回调函数
def find_cb(self, data):
global flog0, flog1
# 获取接收到的Point消息
point_msg = data
# 计算目标点与图像中心的偏差
flog0 = point_msg.x - 320
# 计算偏差的绝对值
flog1 = abs(flog0)
# 如果偏差的绝对值大于0.5
if abs(flog1) > 0.5:
# 创建一个Twist消息
msg = Twist()
# 设置消息的角速度为偏差乘以0.01
msg.angular.z = -0.01 * flog0
# 发布Twist消息
self.pub.publish(msg)
# 如果偏差的绝对值小于等于0.5
elif abs(flog1) <= 0.5:
# 发送射击指令
ser.write(b'\x55\x01\x12\x00\x00\x00\x01\x69')
print ('打印射击')
# 等待0.1秒
time.sleep(0.08)
# 发送停止射击指令
ser.write(b'\x55\x01\x11\x00\x00\x00\x01\x68')
if __name__ == '__main__':
try:
# 创建object_position对象
object_position = object_position()
# 进入ROS事件循环
rospy.spin()
except rospy.ROSInterruptException:
pass5 机器人移动方法1
机器人循环移动,脚本放在abot_ws/src/robot_slam/scripts/move_demo.py1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35#!/usr/bin/env python
#coding: utf-8
# 上面两行是为了告诉操作系统,这是一个Python脚本,并且使用UTF-8编码
import rospy
from geometry_msgs.msg import Twist
# 定义移动机器人的函数
def move_robot(linear_x, angular_z):
# 初始化ROS节点,命名为'move_robot_node',并设置为匿名节点
rospy.init_node('move_robot_node', anonymous=True)
# 创建一个发布者,用于发布Twist类型的消息到/cmd_vel话题
velocity_publisher = rospy.Publisher('/cmd_vel', Twist, queue_size=10)
# 设置ROS发布频率为10Hz
rate = rospy.Rate(10)
# 创建一个Twist消息,设置线速度和角速度
vel_msg = Twist()
vel_msg.linear.x = linear_x
vel_msg.angular.z = angular_z
# 循环发布消息,直到节点被关闭
while not rospy.is_shutdown():
velocity_publisher.publish(vel_msg)
rate.sleep()
if __name__ == '__main__':
try:
# 设置线速度和角速度
linear_x = 0.2 # 线速度
angular_z = 0.5 # 角速度
# 调用move_robot函数,控制机器人移动
move_robot(linear_x, angular_z)
except rospy.ROSInterruptException:
pass6 机器人移动方法2
机器人指定移动,脚本放在abot_ws/src/robot_slam/scripts/move_robot.py1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41#!/usr/bin/env python
#coding: utf-8
# 上面两行是为了告诉操作系统,这是一个Python脚本,并且使用UTF-8编码
import rospy
from geometry_msgs.msg import Twist
class move_robot:
def __init__(self):
# 初始化ROS节点,命名为'move_robot_node',并设置为匿名节点
rospy.init_node('move_robot_node', anonymous=True)
# 创建一个发布者,用于发布Twist类型的消息到/cmd_vel话题
self.pub = rospy.Publisher("/cmd_vel", Twist, queue_size=1000)
# 控制机器人移动的回调函数
def move_cb(self):
global time
# 初始化时间变量
time = 0
# 创建一个Twist消息
msg = Twist()
msg.linear.x = 1.0
msg.linear.y = 0.0
msg.linear.z = 0.0
msg.angular.x = 0.0
msg.angular.y = 0.0
msg.angular.z = 0.0
# 控制机器人移动,持续1秒
while time < 10:
self.pub.publish(msg)
rospy.sleep(0.1)
time += 1
if __name__ == '__main__':
try:
# 创建move_robot对象
move = move_robot()
# 调用move_cb函数,控制机器人移动
move.move_cb()
except rospy.ROSInterruptException:
pass7 射击实现
单独的射击脚本,放在abot_ws/src/robot_slam/scripts/shoot_demo.py1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28a#!/usr/bin/env python
# -*- coding: utf-8 -*-
# 上面这行指定了Python解释器路径,使得脚本可以直接在命令行中执行
import rospy
import serial
import time
from std_msgs.msg import String
# 设置串口和波特率
serialPort = "/dev/shoot"
baudRate = 9600
# 打开串口
ser = serial.Serial(port=serialPort, baudrate=baudRate, parity="N", bytesize=8, stopbits=1)
if __name__ == '__main__':
try:
# 发送射击指令
ser.write(b'\x55\x01\x12\x00\x00\x00\x01\x69')
print ('打印射击')
# 等待0.1秒
time.sleep(0.08)
# 发送停止射击指令
ser.write(b'\x55\x01\x11\x00\x00\x00\x01\x68')
# 进入ROS的spin循环,保持节点持续运行
rospy.spin()
except:
pass8 导航文件
有这么几个自主巡航赛道的导航文件,我们可以借鉴一下,放在abot_ws/src/robot_slam/scripts/
- 注意:每场比赛总用时不超过 2 分钟,即裁判宣布比赛开始时开始计时 2 分钟,2 分钟计时结束则比赛结束,只记录 2 分钟时间内的成绩
- transform_utils.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23#!/usr/bin/env python
""" A couple of handy conversion utilities taken from the turtlebot_node.py script found in the
turtlebot_node ROS package at:
http://www.ros.org/wiki/turtlebot_node
"""
import PyKDL
from math import pi
def quat_to_angle(quat):
rot = PyKDL.Rotation.Quaternion(quat.x, quat.y, quat.z, quat.w)
return rot.GetRPY()[2]
def normalize_angle(angle):
res = angle
while res > pi:
res -= 2.0 * pi
while res < -pi:
res += 2.0 * pi
return res - navigation_multi_goals111.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149#!/usr/bin/env python
import rospy
import actionlib
from actionlib_msgs.msg import *
from move_base_msgs.msg import MoveBaseAction, MoveBaseGoal
from nav_msgs.msg import Path
from geometry_msgs.msg import PoseWithCovarianceStamped
from tf_conversions import transformations
from math import pi
from std_msgs.msg import String
from ar_track_alvar_msgs.msg import AlvarMarkers
from ar_track_alvar_msgs.msg import AlvarMarker
id = 0
flog = 0
class navigation_demo:
def __init__(self):
self.set_pose_pub = rospy.Publisher('/initialpose', PoseWithCovarianceStamped, queue_size=5)
self.arrive_pub = rospy.Publisher('/voiceWords',String,queue_size=10)
self.ar_sub = rospy.Subscriber('/ar_pose_marker', AlvarMarkers, self.ar_cb);
self.move_base = actionlib.SimpleActionClient("move_base", MoveBaseAction)
self.move_base.wait_for_server(rospy.Duration(60))
def ar_cb(self, data):
global id
global flog
ar_markers = data
if (len(ar_markers.markers) == 1):
ar_marker = ar_markers.markers[0]
id = ar_marker.id
if (id == 0) :
if (flog == 0):
arrive_str = "traget point is 1 2 3"
self.arrive_pub.publish(arrive_str)
flog = 10
def set_pose(self, p):
if self.move_base is None:
return False
x, y, th = p
pose = PoseWithCovarianceStamped()
pose.header.stamp = rospy.Time.now()
pose.header.frame_id = 'map'
pose.pose.pose.position.x = x
pose.pose.pose.position.y = y
q = transformations.quaternion_from_euler(0.0, 0.0, th/180.0*pi)
pose.pose.pose.orientation.x = q[0]
pose.pose.pose.orientation.y = q[1]
pose.pose.pose.orientation.z = q[2]
pose.pose.pose.orientation.w = q[3]
self.set_pose_pub.publish(pose)
return True
def _done_cb(self, status, result):
rospy.loginfo("navigation done! status:%d result:%s"%(status, result))
arrive_str = "arrived to traget point"
self.arrive_pub.publish(arrive_str)
def _active_cb(self):
rospy.loginfo("[Navi] navigation has be actived")
def _feedback_cb(self, feedback):
msg = feedback
#rospy.loginfo("[Navi] navigation feedback\r\n%s"%feedback)
def goto(self, p):
rospy.loginfo("[Navi] goto %s"%p)
#arrive_str = "going to next point"
#self.arrive_pub.publish(arrive_str)
goal = MoveBaseGoal()
goal.target_pose.header.frame_id = 'map'
goal.target_pose.header.stamp = rospy.Time.now()
goal.target_pose.pose.position.x = p[0]
goal.target_pose.pose.position.y = p[1]
q = transformations.quaternion_from_euler(0.0, 0.0, p[2]/180.0*pi)
goal.target_pose.pose.orientation.x = q[0]
goal.target_pose.pose.orientation.y = q[1]
goal.target_pose.pose.orientation.z = q[2]
goal.target_pose.pose.orientation.w = q[3]
self.move_base.send_goal(goal, self._done_cb, self._active_cb, self._feedback_cb)
result = self.move_base.wait_for_result(rospy.Duration(60))
if not result:
self.move_base.cancel_goal()
rospy.loginfo("Timed out achieving goal")
else:
state = self.move_base.get_state()
if state == GoalStatus.SUCCEEDED:
rospy.loginfo("reach goal %s succeeded!"%p)
return True
def cancel(self):
self.move_base.cancel_all_goals()
return True
if __name__ == "__main__":
rospy.init_node('navigation_demo',anonymous=True)
goalListX = rospy.get_param('~goalListX', '2.0, 2.0')
goalListY = rospy.get_param('~goalListY', '2.0, 4.0')
goalListYaw = rospy.get_param('~goalListYaw', '0, 90.0')
goals = [[float(x), float(y), float(yaw)] for (x, y, yaw) in zip(goalListX.split(","),goalListY.split(","),goalListYaw.split(","))]
print ('Please 1 to continue: ')
input = raw_input()
print (goals)
r = rospy.Rate(1)
r.sleep()
navi = navigation_demo()
if (input == '1'):
navi.goto(goals[0])
rospy.sleep(2)
print ("target is:" + str(id))
if (id == 0):
navi.goto(goals[1])
rospy.sleep(2)
navi.goto(goals[2])
rospy.sleep(2)
navi.goto(goals[3])
rospy.sleep(2)
navi.goto(goals[7])
rospy.sleep(5)
if (id == 1):
navi.goto(goals[4])
rospy.sleep(2)
navi.goto(goals[5])
rospy.sleep(2)
navi.goto(goals[6])
rospy.sleep(2)
navi.goto(goals[7])
rospy.sleep(5)
if (id == 2):
navi.goto(goals[1])
rospy.sleep(2)
navi.goto(goals[4])
rospy.sleep(2)
navi.goto(goals[6])
rospy.sleep(2)
navi.goto(goals[7])
rospy.sleep(5)
while not rospy.is_shutdown():
r.sleep() - navigation_multi_goals.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173#!/usr/bin/env python
#coding: utf-8
import rospy
import actionlib
from actionlib_msgs.msg import *
from move_base_msgs.msg import MoveBaseAction, MoveBaseGoal
from nav_msgs.msg import Path
from geometry_msgs.msg import PoseWithCovarianceStamped
from tf_conversions import transformations
from math import pi
from std_msgs.msg import String
from ar_track_alvar_msgs.msg import AlvarMarkers
from ar_track_alvar_msgs.msg import AlvarMarker
from geometry_msgs.msg import Point
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
import os
music_path="~/'07.mp3'"
music1_path="~/'07.mp3'"
music2_path="~/'07.mp3'"
music3_path="~/'07.mp3'"
id = 255
flog0 = 255
flog1 = 255
flog2 = 255
count = 0
move_flog = 0
class navigation_demo:
def __init__(self):
self.set_pose_pub = rospy.Publisher('/initialpose', PoseWithCovarianceStamped, queue_size=5)
self.arrive_pub = rospy.Publisher('/voiceWords',String,queue_size=10)
self.ar_sub = rospy.Subscriber('/object_position', Point, self.ar_cb);
self.move_base = actionlib.SimpleActionClient("move_base", MoveBaseAction)
self.move_base.wait_for_server(rospy.Duration(60))
def ar_cb(self, data):
global id
global flog0 , flog1 ,flog2,count,move_flog
id =255
point_msg = data
#rospy.loginfo('z = %d', point_msg.z)
if (point_msg.z != 255 and move_flog == 0) :
if(point_msg.z>=1 and point_msg.z<=8 and flog0 ==255):
id = 0
flog0 = 0
elif(point_msg.z>=9 and point_msg.z<=16 and flog1 ==255):
id = 1
flog1 = 1
elif(point_msg.z>=17 and point_msg.z<=24 and flog2 ==255):
id = 2
flog2 = 2
elif (point_msg.z != 255 and move_flog == 1) :
if(point_msg.z>=1 and point_msg.z<=8):
id = 0
elif(point_msg.z>=9 and point_msg.z<=16):
id = 1
elif(point_msg.z>=17 and point_msg.z<=24):
id = 2
#print flog0 , flog1 , flog2
#rospy.loginfo('id = %d', id)
def set_pose(self, p):
if self.move_base is None:
return False
x, y, th = p
pose = PoseWithCovarianceStamped()
pose.header.stamp = rospy.Time.now()
pose.header.frame_id = 'map'
pose.pose.pose.position.x = x
pose.pose.pose.position.y = y
q = transformations.quaternion_from_euler(0.0, 0.0, th/180.0*pi)
pose.pose.pose.orientation.x = q[0]
pose.pose.pose.orientation.y = q[1]
pose.pose.pose.orientation.z = q[2]
pose.pose.pose.orientation.w = q[3]
self.set_pose_pub.publish(pose)
return True
def _done_cb(self, status, result):
rospy.loginfo("navigation done! status:%d result:%s"%(status, result))
arrive_str = "arrived to traget point"
self.arrive_pub.publish(arrive_str)
def _active_cb(self):
rospy.loginfo("[Navi] navigation has be actived")
def _feedback_cb(self, feedback):
msg = feedback
#rospy.loginfo("[Navi] navigation feedback\r\n%s"%feedback)
def goto(self, p):
rospy.loginfo("[Navi] goto %s"%p)
#arrive_str = "going to next point"
#self.arrive_pub.publish(arrive_str)
goal = MoveBaseGoal()
goal.target_pose.header.frame_id = 'map'
goal.target_pose.header.stamp = rospy.Time.now()
goal.target_pose.pose.position.x = p[0]
goal.target_pose.pose.position.y = p[1]
q = transformations.quaternion_from_euler(0.0, 0.0, p[2]/180.0*pi)
goal.target_pose.pose.orientation.x = q[0]
goal.target_pose.pose.orientation.y = q[1]
goal.target_pose.pose.orientation.z = q[2]
goal.target_pose.pose.orientation.w = q[3]
self.move_base.send_goal(goal, self._done_cb, self._active_cb, self._feedback_cb)
result = self.move_base.wait_for_result(rospy.Duration(60))
if not result:
self.move_base.cancel_goal()
rospy.loginfo("Timed out achieving goal")
else:
state = self.move_base.get_state()
if state == GoalStatus.SUCCEEDED:
rospy.loginfo("reach goal %s succeeded!"%p)
return True
def cancel(self):
self.move_base.cancel_all_goals()
return True
if __name__ == "__main__":
rospy.init_node('navigation_demo',anonymous=True)
goalListX = rospy.get_param('~goalListX', '2.0, 2.0')
goalListY = rospy.get_param('~goalListY', '2.0, 4.0')
goalListYaw = rospy.get_param('~goalListYaw', '0, 90.0')
goals = [[float(x), float(y), float(yaw)] for (x, y, yaw) in zip(goalListX.split(","),goalListY.split(","),goalListYaw.split(","))]
print ('Please 1 to continue: ')
input = raw_input()
print (goals)
r = rospy.Rate(1)
r.sleep()
navi = navigation_demo()
if (input == '1'):
# os.system('mplayer %s' % music_path)
navi.goto(goals[0])
rospy.sleep(2)
if (flog0+flog1+flog2 <= 255):
move_flog=1
if(flog0 == 0 or flog1 ==1 or flog2 == 2):
os.system('mplayer %s' % music_path)
print 'case1'
print flog2,flog1,flog0
navi.goto(goals[1])
rospy.sleep(2)
navi.goto(goals[2])
rospy.sleep(3)
print id
if (id == flog0 or id == flog1 or id == flog2):
navi.goto(goals[3])
rospy.sleep(2)
else:
print "no track"
navi.goto(goals[4])
rospy.sleep(2)
if (id == flog0 or id == flog1 or id == flog2):
navi.goto(goals[5])
rospy.sleep(2)
#os.system('mplayer %s' % music_path)
else:
print "no track"
while not rospy.is_shutdown():
r.sleep() - navigation_goals.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78#!/usr/bin/env python
import rospy
import actionlib
from actionlib_msgs.msg import *
from move_base_msgs.msg import MoveBaseAction, MoveBaseGoal
from nav_msgs.msg import Path
from geometry_msgs.msg import PoseWithCovarianceStamped
from tf_conversions import transformations
from math import pi
class navigation_demo:
def __init__(self):
self.set_pose_pub = rospy.Publisher('/initialpose', PoseWithCovarianceStamped, queue_size=5)
self.move_base = actionlib.SimpleActionClient("move_base", MoveBaseAction)
self.move_base.wait_for_server(rospy.Duration(60))
def set_pose(self, p):
if self.move_base is None:
return False
x, y, th = p
pose = PoseWithCovarianceStamped()
pose.header.stamp = rospy.Time.now()
pose.header.frame_id = 'map'
pose.pose.pose.position.x = x
pose.pose.pose.position.y = y
q = transformations.quaternion_from_euler(0.0, 0.0, th/180.0*pi)
pose.pose.pose.orientation.x = q[0]
pose.pose.pose.orientation.y = q[1]
pose.pose.pose.orientation.z = q[2]
pose.pose.pose.orientation.w = q[3]
self.set_pose_pub.publish(pose)
return True
def _done_cb(self, status, result):
rospy.loginfo("navigation done! status:%d result:%s"%(status, result))
def _active_cb(self):
rospy.loginfo("[Navi] navigation has be actived")
def _feedback_cb(self, feedback):
rospy.loginfo("[Navi] navigation feedback\r\n%s"%feedback)
def goto(self, p):
goal = MoveBaseGoal()
goal.target_pose.header.frame_id = 'map'
goal.target_pose.header.stamp = rospy.Time.now()
goal.target_pose.pose.position.x = p[0]
goal.target_pose.pose.position.y = p[1]
q = transformations.quaternion_from_euler(0.0, 0.0, p[2]/180.0*pi)
goal.target_pose.pose.orientation.x = q[0]
goal.target_pose.pose.orientation.y = q[1]
goal.target_pose.pose.orientation.z = q[2]
goal.target_pose.pose.orientation.w = q[3]
self.move_base.send_goal(goal, self._done_cb, self._active_cb, self._feedback_cb)
return True
def cancel(self):
self.move_base.cancel_all_goals()
return True
if __name__ == "__main__":
rospy.init_node('navigation_move',anonymous=True)
r = rospy.Rate(0.2)
rospy.loginfo("set pose...")
navi = navigation_demo()
# navi.set_pose([0.0,0.0,0.0])
rospy.loginfo("goto goal...")
navi.goto([0,0.0, 0])逻辑文件
- 综合以上几个模块的功能实现,我们可以编写一个逻辑文件,来完成我们想要的任务。
由于比赛尚未结束,目前仍在竞争阶段,逻辑文件就暂不公开啦~运行
- 运行我们有启动脚本,目前我们调用的脚本为依据./4-shoot.sh和./6-mission.sh修改的0.sh文件
- 0.sh
1
2
3
4
5
6
7
8
9##gmapping with abot###
gnome-terminal --window -e 'bash -c "roscore; exec bash"' \
--tab -e 'bash -c "sleep 3; source ~/abot_ws/devel/setup.bash; roslaunch abot_bringup robot_with_imu.launch; exec bash"' \
--tab -e 'bash -c "sleep 3; source ~/abot_ws/devel/setup.bash; roslaunch abot_bringup shoot.launch; exec bash"' \
--tab -e 'bash -c "sleep 4; source ~/abot_ws/devel/setup.bash; roslaunch robot_slam navigation.launch; exec bash"' \
--tab -e 'bash -c "sleep 3; source ~/abot_vision/devel/setup.bash; roslaunch track_tag usb_cam_with_calibration.launch; exec bash"' \
--tab -e 'bash -c "sleep 3; source ~/abot_vision/devel/setup.bash; roslaunch track_tag ar_track_camera.launch; exec bash"' \
--tab -e 'bash -c "sleep 3; source ~/abot_ws/devel/setup.bash; roslaunch find_object_2d find_object_2d.launch; exec bash"' \
--tab -e 'bash -c "sleep 4; source ~/abot_ws/devel/setup.bash; roslaunch robot_slam multi_goal_shoot.launch; exec bash"' \ - 这个文件放在abot_ws目录下,所以需要
cd abot_ws
以后才能输入./0.sh
启动脚本调试部分
导航点调节
- 结合逻辑文件,调节射击阈值和导航点,以下是导航点的调节方式
- 导航点文件在abot_ws\src\robot_slam\launch\multi_goal_shoot.launch,注意,这个launch文件是根据multi_goal.launch生成的,因此记得需要在启动脚本中引用。
1
2
3
4
5
6
7<launch>
<node name="multi_goal" pkg="robot_slam" type="你的逻辑文件" output="screen">
<param name="goalListX" value=" 任务点1X, 任务点2X,以此类推"/>
<param name="goalListY" value=" 任务点1Y, 任务点2Y,以此类推"/>
<param name="goalListYaw" value="任务点1旋转角度, 任务点2旋转角度, 以此类推"/>
</node>
</launch>射击点调节
- 由于今年比赛规则修改且难度提升,不再像以前是三个固定靶子射击,今年填入A点颜色靶子(环形靶)
- 环形靶需要调节部分为偏差乘数(调节旋转速度)、flog1(调节射击精度)
1
2
3
4
5
6
7
8
9
10if abs(flog1) > 0.5:
# 创建一个Twist消息
msg = Twist()
# 设置消息的角速度为偏差乘以0.01
msg.angular.z = -0.01 * flog0
# 发布Twist消息
self.pub.publish(msg)
# 如果偏差的绝对值小于等于0.5
elif abs(flog1) <= 0.5:
# 发送射击指令 - B靶为环形旋转靶,需要调节部分为marker.id(射击哪个靶子)Max_y(靶子射击的最大高度)Min_y(靶子射击的最小高度)Yaw_th(B点射击精度):
1
2
3
4
5
6
7
8
9
10
11if marker.id == 1 and case == 1:
ar_x_0 = marker.pose.pose.position.x
ar_y_0 = marker.pose.pose.position.y
ar_x_0_abs = abs(ar_x_0)
ar_y_0_abs = abs(ar_y_0)
if ar_x_0_abs >= Yaw_th:
msg = Twist()
msg.angular.z = -1.5 * ar_x_0
self.pub.publish(msg)
elif ar_y_0_abs <= Max_y and ar_y_0_abs >= Min_y and case ==1:
# 发送射击指令 - C 靶为来回移动靶,需要调节部分为marker.id(射击哪个靶子),Yaw_th1(C点射击精度):
1
2
3
4
5
6
7
8
9
10if marker.id == 0 and case == 2:
ar_x_0 = marker.pose.pose.position.x
ar_y_0 = marker.pose.pose.position.y
ar_x_0_abs = abs(ar_x_0)
if ar_x_0_abs >= Yaw_th1:
msg = Twist()
msg.angular.z = -1.5 * ar_x_0
self.pub.publish(msg)
elif ar_x_0_abs < Yaw_th1 and case == 2:
# 发送射击指令小车大小及膨胀系数调节
- 小车参数文件在”abot_ws\src\robot_slam\params\carto\costmap_common_params.yaml”
1
2
3
4
5
6
7
8
9
10
11
12obstacle_range: 3.0
raytrace_range: 3.5
footprint: [[X1, Y1], [X2, Y2], [X3, Y3], [X4, Y4]]#小车参数(目前是正方形)
#robot_radius: 0.17
inflation_radius: 膨胀系数
cost_scaling_factor: 3.0
map_type: costmap
observation_sources: scan
scan: {sensor_frame: laser_link, data_type: LaserScan, topic: scan_filtered, marking: true, clearing: true}TF调节
- TF变换文件位置在:”abot_ws\src\abot_base\abot_bringup\launch\bringup_with_imu.launch”
往届终点为最困难的部分,很多人因避障进不去终点,因此,这部分等比赛结束才公开终点调节
往届终点为最困难的部分,很多人因避障进不去终点,因此,这部分等比赛结束才公开建图
- 运行./1-gmapping.sh
1
文件丢失~
- 走完全图以后输入代码
1
roslaunch robot_slam save_map.launch
比赛进度
- 校赛评审√
- 校赛获奖:校级一等奖
- 省赛报名
- 省赛√
- 省赛获奖:省级三等奖
原因:靶子坏了。
本博客所有文章除特别声明外,均采用 CC BY-NC-SA 4.0 许可协议。转载请注明来自 m_HG_m's blog!
评论