Compare commits

..

153 Commits

Author SHA1 Message Date
90a1c03a5c :sparkles 2022-11-07 14:08:25 +01:00
30213107de :sparkles 2022-11-07 14:07:53 +01:00
78e76da161 :sparkles 2022-11-07 12:09:03 +01:00
1c79989840 :sparkles 2022-11-07 12:07:02 +01:00
90b5ff824d :sparkles 2022-11-07 12:06:27 +01:00
144551df39 2022-11-02 15:26:39 +01:00
e18767fcb6 2022-11-02 15:23:54 +01:00
919b7b2c4d 2022-11-02 15:22:35 +01:00
b30833dcae 2022-11-02 15:15:26 +01:00
12bcfe412f 2022-11-02 15:12:29 +01:00
374c321c4c 2022-11-02 15:08:31 +01:00
e974a0bb27 2022-11-02 15:05:39 +01:00
f7d459482a 2022-11-02 15:05:02 +01:00
073897c485 2022-11-02 15:01:03 +01:00
76a459fc9f 2022-11-02 14:58:55 +01:00
b0bd525726 2022-11-02 14:54:07 +01:00
c70da77fc9 2022-11-02 14:49:30 +01:00
60dbadabe1 2022-11-02 14:49:02 +01:00
bcf1ae2a5a 2022-11-02 14:46:59 +01:00
82e4c2eb43 2022-11-02 14:45:46 +01:00
02c2c04ca4 2022-11-02 14:43:44 +01:00
5fdcf1c0dc 2022-11-02 14:27:43 +01:00
71b7c67fff 2022-11-02 14:20:35 +01:00
fbd30c388c 2022-11-02 14:19:12 +01:00
ffe364dd97 2022-11-02 14:18:07 +01:00
21236301e0 2022-11-02 14:14:22 +01:00
9d8e95d711 2022-11-02 14:09:27 +01:00
22ef920735 2022-11-02 11:17:13 +01:00
0558fcd9e4 2022-11-02 11:16:22 +01:00
9890800079 2022-11-02 10:27:03 +01:00
d68039f09f 2022-11-02 10:26:30 +01:00
323185d08d 2022-11-02 10:19:05 +01:00
baed8628be 2022-11-02 10:12:59 +01:00
850ccb8179 2022-11-02 10:09:33 +01:00
879adf7b91 2022-10-31 15:48:50 +01:00
123c8fe350 2022-10-31 15:23:19 +01:00
f93a046270 2022-10-31 15:20:21 +01:00
9cc3b41f87 2022-10-31 15:12:38 +01:00
81862f8ee3 2022-10-31 15:07:58 +01:00
3219028980 2022-10-31 15:03:15 +01:00
cf21934931 2022-10-31 14:56:10 +01:00
a7616ad41c 2022-10-31 14:55:39 +01:00
963509fd8a 2022-10-31 14:53:58 +01:00
2c65a75f28 2022-10-31 14:50:11 +01:00
0621d02fff 2022-10-31 14:49:29 +01:00
51b29110bc 2022-10-31 14:48:20 +01:00
8984fcf6af 2022-10-31 14:45:43 +01:00
6c27ad4805 Quick fix for imports 2022-10-31 14:40:28 +01:00
437ccde0f1 2022-10-26 14:56:57 +02:00
e329d2b87f 2022-10-26 14:54:10 +02:00
679d6b78c5 2022-10-26 14:52:20 +02:00
302095a929 2022-10-26 14:50:59 +02:00
8be9656f5d 2022-10-26 14:45:06 +02:00
15fab9c09c 2022-10-26 14:43:48 +02:00
04c8dcc96e 2022-10-26 14:37:03 +02:00
5ddcd8499d 2022-10-26 14:34:57 +02:00
9145085739 2022-10-26 14:32:30 +02:00
ebf356445b 2022-10-26 14:30:18 +02:00
ef5f297a47 2022-10-26 14:30:13 +02:00
0e46b0dbe2 2022-10-26 14:17:19 +02:00
ca4929de5c 2022-10-26 14:16:03 +02:00
e0cea5cb78 2022-10-26 14:15:14 +02:00
a626ad7c14 2022-10-26 14:13:21 +02:00
be1183c611 2022-10-26 14:08:23 +02:00
94bbb57d65 2022-10-26 14:04:08 +02:00
05f654ba94 2022-10-26 14:02:57 +02:00
2369aad004 2022-10-26 13:54:14 +02:00
7edb905bd2 2022-10-26 13:50:34 +02:00
cb33cc0027 2022-10-26 13:50:07 +02:00
417dd962ae 2022-10-26 13:43:06 +02:00
b636440409 2022-10-26 13:42:20 +02:00
616d902ee2 😼 2022-10-26 13:37:40 +02:00
c78c596db2 👑 2022-10-26 13:09:52 +02:00
ad57381bc0 🍆 2022-10-26 10:47:04 +02:00
a7ffe3f581 💀 skeleton for rally 2022-10-26 10:28:25 +02:00
23b99b56d5 2022-10-26 09:47:37 +02:00
5e2766b01e 2022-10-26 09:45:19 +02:00
dc23e96fba 2022-10-24 15:11:44 +02:00
ef9cabcaa5 2022-10-24 14:56:26 +02:00
d3db855dad 2022-10-24 14:52:29 +02:00
d8647d4466 2022-10-24 14:49:07 +02:00
4ac0ebbd22 2022-10-24 14:44:34 +02:00
1f763a512f 2022-10-24 14:42:48 +02:00
edec956494 2022-10-24 14:41:36 +02:00
4e41d872ce 2022-10-24 14:34:46 +02:00
f822019bd1 2022-10-24 14:32:08 +02:00
3b1ce060df 2022-10-24 14:30:44 +02:00
492f7b90cd 2022-10-24 14:30:12 +02:00
4c89c36a78 2022-10-24 14:27:38 +02:00
afd5be5eb3 2022-10-24 14:22:15 +02:00
93df18a433 2022-10-24 14:21:37 +02:00
ab5974d46a 2022-10-24 14:21:08 +02:00
85289bc5c9 2022-10-24 14:20:43 +02:00
90d13f8a03 2022-10-24 14:20:11 +02:00
03245ecd2a 2022-10-24 14:19:40 +02:00
1a4587d60c 2022-10-24 14:19:16 +02:00
44434488a6 2022-10-24 14:18:23 +02:00
1edc312c8a 2022-10-24 14:17:43 +02:00
5714651a8d 2022-10-24 14:09:48 +02:00
2372c85148 2022-10-24 14:03:29 +02:00
2c10cc9346 2022-10-12 14:02:22 +02:00
64d548ddfa :water: 2022-10-12 13:23:20 +02:00
a97b998754 :flame: 2022-10-12 11:10:08 +02:00
783f3291c8 2022-10-12 10:44:18 +02:00
baf83d8fee 2022-10-10 15:09:55 +02:00
27f039059f 2022-10-10 15:04:48 +02:00
1c5a0edc0b 2022-10-10 15:03:16 +02:00
8884013910 2022-10-10 15:01:54 +02:00
af3064ad4c 2022-10-10 15:00:30 +02:00
85319f7631 2022-10-10 14:57:15 +02:00
5966c1188b 2022-10-10 14:54:32 +02:00
1f9cb9069e 2022-10-10 14:51:06 +02:00
f8de362c43 2022-10-10 14:47:19 +02:00
f60b56587a 2022-10-10 14:44:41 +02:00
3e32a38ad2 2022-10-10 14:41:49 +02:00
d4a4d1c443 2022-10-10 14:40:54 +02:00
ac5e76d977 2022-10-10 14:39:38 +02:00
86b621fa9d 2022-10-10 14:38:55 +02:00
a0eeeba787 2022-10-10 14:37:09 +02:00
b3aaf40487 2022-10-10 14:32:50 +02:00
0860ccd58b 2022-10-10 14:31:11 +02:00
ac6d94ee39 2022-10-10 14:28:11 +02:00
dc8f23bd5e 2022-10-10 14:27:09 +02:00
5e7509d2b6 2022-10-10 14:25:03 +02:00
8a2156a5d9 2022-10-10 14:23:26 +02:00
642591ecc4 2022-10-10 14:16:47 +02:00
b7ccb84c19 2022-10-10 14:14:01 +02:00
0bad669ba5 2022-10-10 14:10:22 +02:00
f2e3a7f56a 2022-10-10 14:07:57 +02:00
b0e4bcf85c 2022-10-10 14:05:31 +02:00
e3916ab6a0 2022-10-10 14:01:59 +02:00
3a3903176a 2022-10-10 14:00:45 +02:00
f77b2d32bd 2022-10-10 13:48:46 +02:00
f547e8e0c3 2022-10-10 13:47:38 +02:00
e1e229efc7 2022-10-10 13:41:02 +02:00
8c2a835e01 2022-10-10 13:39:05 +02:00
640db167fe 2022-10-05 11:40:03 +02:00
f598801950 2022-10-03 14:36:22 +02:00
2009e1c972 2022-09-28 14:24:10 +02:00
a025ed0098 2022-09-28 14:23:19 +02:00
341de6027a 2022-09-28 14:22:14 +02:00
1b0c989800 2022-09-28 14:19:58 +02:00
81815b17a6 2022-09-28 14:18:49 +02:00
6b3c84fff2 2022-09-28 14:12:51 +02:00
2490d93a3c 2022-09-28 14:11:33 +02:00
7fb14f9345 2022-09-28 14:06:13 +02:00
95c011e4e7 2022-09-28 14:04:25 +02:00
f546e3f8f5 2022-09-28 14:03:26 +02:00
13bde48720 2022-09-28 14:02:18 +02:00
0cb11b5264 2022-09-28 14:01:11 +02:00
eeafcc7f8b 2022-09-28 13:59:45 +02:00
81d9ae690a 2022-09-28 13:58:59 +02:00
8cf1d8dcbd 2022-09-28 13:58:15 +02:00
12 changed files with 654 additions and 90 deletions

BIN
.DS_Store vendored Normal file

Binary file not shown.

BIN
Examples/.DS_Store vendored Normal file

Binary file not shown.

View File

@ -13,7 +13,7 @@ DRIVE_T = 22 # 1 centimeter
RIGHT_WHEEL_OFFSET = 4
CLOCKWISE_OFFSET = 0.96
CLOCKWISE_OFFSET = 0.92
FOCAL_LENGTH = 1691
@ -51,33 +51,32 @@ def find_arucos(arlo):
position = cv2.aruco.estimatePoseSingleMarkers(
np.array([aruco]), 14.5, CAMERA_MATRIX, DIST_COEF
)[1][0][0]
position = np.array([position[0], position[1]])
position = np.array([position[2], position[0]])
position[0] += 22.5
aruco_dict[id_] = position
if len(aruco_dict) >= 2:
break
arlo.go_diff(POWER, POWER, 1, 0)
sleep((20 * TURN_T)/1000)
sleep((20 * TURN_T * CLOCKWISE_OFFSET)/1000)
arlo.stop()
for key, value in aruco_dict.items():
print(rot, value)
aruco_dict[key] = np.dot(rot, value)
return np.array(aruco_dict.values())[:2]
return np.array(list(aruco_dict.values())[:2])
def main():
arlo = Robot()
aruco_positions = find_arucos(arlo)
print(aruco_positions)
position = [
np.average(aruco_positions[:,0]),
np.average(aruco_positions[:,2])
np.average(aruco_positions[:,1])
]
print(position)
angle = np.rad2deg(np.arctan(position[0]/position[1]))
angle = np.rad2deg(np.arctan(position[1]/position[0]))
drive_distance = np.sqrt(position[0]**2 + position[1]**2)
if angle < 0:
arlo.go_diff(POWER, POWER, 0, 1)

View File

@ -52,6 +52,7 @@ def main():
angle = np.rad2deg(np.arctan(position[0]/position[2]))
drive_distance = np.sqrt(position[0]**2 + position[2]**2)
print(drive_distance)
if angle < 0:
arlo.go_diff(POWER, POWER, 0, 1)
sleep((abs(angle) * TURN_T)/1000)
@ -61,9 +62,9 @@ def main():
sleep((abs(angle) * TURN_T * CLOCKWISE_OFFSET)/1000)
arlo.stop()
arlo.go_diff(POWER, POWER + RIGHT_WHEEL_OFFSET, 1, 1)
sleep((drive_distance * DRIVE_T)/1000)
arlo.stop()
# arlo.go_diff(POWER, POWER + RIGHT_WHEEL_OFFSET, 1, 1)
# sleep((drive_distance * DRIVE_T)/1000)
# arlo.stop()
if __name__ == "__main__":

20
occupancy_grids.py Normal file
View File

@ -0,0 +1,20 @@
from matplotlib.backend_bases import NonGuiException
import matplotlib.pyplot as plt
import numpy as np
from numpy.random import uniform
def Binary_Baye_Filter (I, zt, x):
pass
def invserse_sensor_model(m, x, z):
pass
def update(particles, weights, z, R, landmarks):
for i, landmark in enumerate(landmarks):
distance = np.linalg.norm(particles[:, 0:2] - landmark, axis=1)
weights *= np.scipy.stats.norm(distance, R).pdf(z[i])
weights += 1.e-300 # avoid round-off to zero
weights /= sum(weights) # normalize

154
rally.py Normal file
View File

@ -0,0 +1,154 @@
import time
import numpy as np
import cv2
import robot
LANDMARKS = [1,2,3,4] # SET ARUCO NUMBERS
POWER = 70
TURN_T = 8.3 # 1 degree
DRIVE_T = 22 # 1 centimeter
RIGHT_WHEEL_OFFSET = 5
CLOCKWISE_OFFSET = 0.8
FOCAL_LENGTH = 1691
CAMERA_MATRIX = np.array(
[[FOCAL_LENGTH,0,512],[0,FOCAL_LENGTH,360],[0,0,1]],
dtype=np.float32
)
DIST_COEF = np.array([0,0,0,0,0], dtype=np.float32)
def find_aruco(image):
aruco_dict = cv2.aruco.Dictionary_get(cv2.aruco.DICT_6X6_250)
aruco_params = cv2.aruco.DetectorParameters_create()
corners, ids, _ = cv2.aruco.detectMarkers(
image,
aruco_dict,
parameters=aruco_params
)
if corners is None:
return []
return {ids[i][0]: box[0] for i, box in enumerate(corners)}
def drunk_drive(drive_time, arlo, careful_range = 600):
start = time.time()
end = start + drive_time
turning = None
while time.time() < end:
forward_dist = arlo.read_front_ping_sensor()
right_dist = arlo.read_right_ping_sensor()
left_dist = arlo.read_left_ping_sensor()
if forward_dist > careful_range and all(x > 250 for x in [right_dist, left_dist]):
turning = None
arlo.go_diff(POWER, POWER + RIGHT_WHEEL_OFFSET, 1, 1)
else:
if turning == "R" or (forward_dist > careful_range and right_dist > 250 and turning is None):
arlo.go_diff(POWER, POWER, 1, 0)
turning = "R"
else:
arlo.go_diff(POWER, POWER + RIGHT_WHEEL_OFFSET, 0, 1)
turning = "L"
time.sleep(0.01)
def careful_forward(drive_time, arlo, careful_range = 600):
start = time.time()
if drive_time > 0.8:
drive_time = 0.8
hit_something = True
else:
hit_something = False
end = start + drive_time
turning = None
while time.time() < end:
forward_dist = arlo.read_front_ping_sensor()
right_dist = arlo.read_right_ping_sensor()
left_dist = arlo.read_left_ping_sensor()
if forward_dist > careful_range and all(x > 250 for x in [right_dist, left_dist]):
turning = None
arlo.go_diff(POWER, POWER + RIGHT_WHEEL_OFFSET, 1, 1)
else:
hit_something = True
if turning == "R" or (forward_dist > careful_range and right_dist > 250 and turning is None):
arlo.go_diff(POWER, POWER, 1, 0)
turning = "R"
else:
arlo.go_diff(POWER, POWER + RIGHT_WHEEL_OFFSET, 0, 1)
turning = "L"
time.sleep(0.01)
return hit_something
def main():
landmark_order = LANDMARKS + [
LANDMARKS[0]
]
noah = robot.Robot()
while landmark_order != []:
landmark = landmark_order.pop(0)
print(f"looking for landmark {landmark}")
while True:
while True:
for _ in range(24):
arucos = find_aruco(noah.take_photo())
if landmark in arucos:
break
noah.go_diff(POWER, POWER, 0, 1)
time.sleep((20 * TURN_T)/1000)
noah.stop()
if landmark in arucos:
break
drunk_drive(3, noah)
position = cv2.aruco.estimatePoseSingleMarkers(
np.array([arucos[landmark]]), 14.5, CAMERA_MATRIX, DIST_COEF
)[1][0][0]
angle = np.rad2deg(np.arctan(position[0]/position[2])) - 5
drive_distance = np.sqrt(position[0]**2 + position[2]**2) - 100
if angle < 0:
noah.go_diff(POWER, POWER, 0, 1)
time.sleep((abs(angle) * TURN_T)/1000)
noah.stop()
else:
noah.go_diff(POWER, POWER, 1, 0)
time.sleep((abs(angle) * TURN_T * CLOCKWISE_OFFSET)/1000)
noah.stop()
if not careful_forward((drive_distance * DRIVE_T)/1000, noah, 400):
arucos = find_aruco(noah.take_photo())
if landmark in arucos:
position = cv2.aruco.estimatePoseSingleMarkers(
np.array([arucos[landmark]]), 14.5, CAMERA_MATRIX, DIST_COEF
)[1][0][0]
drive_distance = np.sqrt(position[0]**2 + position[2]**2) - 25
noah.go_diff(POWER, POWER, 1, 1)
time.sleep((drive_distance * DRIVE_T)/1000)
noah.stop()
break
if __name__ == "__main__":
start = time.time()
main()
print(f"Drove for {time.time() - start} seconds")

BIN
rally.zip Normal file

Binary file not shown.

314
rally2.py Normal file
View File

@ -0,0 +1,314 @@
import time
from turtle import right
import numpy as np
import cv2
from selflocalization import particle, camera
import robot
LANDMARKS = [6,8,9,7]
LANDMARK_POSITIONS = {
6: [0, 0],
8: [300, 0],
9: [0, 400],
7: [300, 400]
}
POWER = 70
TURN_T = 9.6 # 1 degree
DRIVE_T = 22 # 1 centimeter
RIGHT_WHEEL_OFFSET = 4
CLOCKWISE_OFFSET = 0.96
FOCAL_LENGTH = 1691
CAMERA_MATRIX = np.array(
[[FOCAL_LENGTH, 0, 512], [0, FOCAL_LENGTH, 360], [0, 0, 1]],
dtype=np.float32
)
DIST_COEF = np.array([0, 0, 0, 0, 0], dtype=np.float32)
SIGMA = 3
SIGMA_THETA = 0.3
NUM_PARTICLES = 1000
def look_around(noah, particles, cam, est_pose):
for _ in range(24):
time.sleep(0.2)
# Fetch next frame
for _ in range(10):
colour = cam.get_next_frame()
landmark_values = []
# Detect objects
objectIDs, dists, angles = cam.detect_aruco_objects(colour)
if not isinstance(objectIDs, type(None)):
# List detected objects
for i in range(len(objectIDs)):
print(
"Object ID = ", objectIDs[i], ", Distance = ", dists[i], ", angle = ", angles[i])
if objectIDs[i] in LANDMARKS:
landmark_values.append((objectIDs[i], dists[i] + 22.5, angles[i]))
# Compute particle weights
for p in particles:
calc_weight(p, landmark_values)
# Resampling
particles = particle.resample(particles)
# Draw detected objects
cam.draw_aruco_objects(colour)
else:
# No observation - reset weights to uniform distribution
for p in particles:
p.setWeight(1.0/NUM_PARTICLES)
particle.add_uncertainty(particles, SIGMA, SIGMA_THETA)
# The estimate of the robots current pose
est_pose = particle.estimate_pose(particles)
calc_weight(est_pose, landmark_values)
noah.go_diff(POWER, POWER, 0, 1)
time.sleep((15 * TURN_T)/1000)
noah.stop()
for p in particles:
p.setTheta(p.theta - np.deg2rad(15))
# The estimate of the robots current pose
est_pose = particle.estimate_pose(particles)
return particles, est_pose
def turn_towards_landmark(noah, particles, est_pose, landmark):
current_position = np.array([est_pose.x, est_pose.y])
current_theta = est_pose.theta
landmark_position = np.array(LANDMARK_POSITIONS[landmark])
relative_pos = landmark_position - current_position
angle = np.rad2deg(np.arctan(relative_pos[1]/relative_pos[0]))
turn_angle = np.mod(angle - (np.rad2deg(current_theta) - 180), 360)
for p in particles:
p.setTheta(p.theta - np.deg2rad(turn_angle))
if turn_angle < 180:
noah.go_diff(POWER, POWER, 0, 1)
time.sleep((abs(turn_angle) * TURN_T)/1000)
noah.stop()
else:
noah.go_diff(POWER, POWER, 1, 0)
time.sleep((abs(360 - turn_angle) * TURN_T * CLOCKWISE_OFFSET)/1000)
noah.stop()
particle.add_uncertainty(particles, SIGMA, SIGMA_THETA)
# The estimate of the robots current pose
est_pose = particle.estimate_pose(particles)
return particles, est_pose
def time_to_landmark(est_pose, landmark):
"""Kør indenfor 1 meter"""
current_position = np.array([est_pose.x, est_pose.y])
landmark_position = np.array(LANDMARK_POSITIONS[landmark])
relative_pos = landmark_position - current_position
drive_distance = np.sqrt(relative_pos[0]**2 + relative_pos[1]**2) - 100
return (DRIVE_T * drive_distance)/1000
def drive_until_stopped(noah, particles):
noah.go_diff(POWER, POWER+RIGHT_WHEEL_OFFSET, 1, 1)
start = time.time()
while True:
forward_dist = noah.read_front_ping_sensor()
if forward_dist < 1000:
noah.stop()
break
left_dist = noah.read_left_ping_sensor()
if left_dist < 400:
noah.stop()
break
right_dist = noah.read_right_ping_sensor()
if right_dist < 400:
noah.stop()
break
if time.time() - start > 5:
noah.stop()
break
time.sleep(0.01)
end = time.time()
time_driven = end - start
distance_driven = (time_driven*1000)/DRIVE_T
for p in particles:
x = np.cos(p.theta) * distance_driven
y = np.sin(p.theta) * distance_driven
p.x = p.x + x
p.y = p.y + y
particle.add_uncertainty(particles, SIGMA, SIGMA_THETA)
est_pose = particle.estimate_pose(particles)
return time_driven, est_pose, particles
def drunk_drive(noah):
start = time.time()
end = start + 2
turning = None
while time.time() < end:
forward_dist = noah.read_front_ping_sensor()
right_dist = noah.read_right_ping_sensor()
left_dist = noah.read_left_ping_sensor()
if forward_dist > 600 and all(x > 400 for x in [right_dist, left_dist]):
turning = None
noah.go_diff(POWER, POWER + RIGHT_WHEEL_OFFSET, 1, 1)
else:
if turning == "R" or (forward_dist > 600 and right_dist > 400 and turning is None):
noah.go_diff(POWER, POWER, 1, 0)
turning = "R"
else:
noah.go_diff(POWER, POWER + RIGHT_WHEEL_OFFSET, 0, 1)
turning = "L"
time.sleep(0.01)
def inch_closer(noah, particles):
start = time.time()
noah.go_diff(POWER, POWER+RIGHT_WHEEL_OFFSET, 1, 1)
while True:
forward_dist = noah.read_front_ping_sensor()
if forward_dist < 300:
noah.stop()
break
end = time.time()
time_driven = end - start
distance_driven = (time_driven*1000)/DRIVE_T
for p in particles:
x = np.cos(p.theta) * distance_driven
y = np.sin(p.theta) * distance_driven
p.x = p.x + x
p.y = p.y + y
particle.add_uncertainty(particles, SIGMA, SIGMA_THETA)
est_pose = particle.estimate_pose(particles)
return est_pose, particles
def initialize_particles(num_particles):
particles = []
for _ in range(num_particles):
# Random starting points.
p = particle.Particle(600.0*np.random.ranf() - 100.0, 600.0*np.random.ranf(
) - 250.0, np.mod(2.0*np.pi*np.random.ranf(), 2.0*np.pi), 1.0/num_particles)
particles.append(p)
return particles
def dist(particle, landmark):
return np.sqrt((landmark[0]-particle.x)**2+(landmark[1]-particle.y)**2)
def calc_angle(particle, landmark, dist):
e_theta = np.array([np.cos(particle.theta), np.sin(particle.theta)]).T
e_landmark = (
np.array([landmark[0]-particle.x, landmark[1]-particle.y]).T)/dist
e_hat_theta = np.array([-np.sin(particle.theta), np.cos(particle.theta)]).T
return np.sign(np.dot(e_landmark, e_hat_theta)) * np.arccos(np.dot(e_landmark, e_theta))
def normal(x, mean, std):
return (
(np.exp(-(((x - mean)**2)/(2 * std**2))))/(np.sqrt(2 * np.pi) * std)
)
def calc_weight(particle, landmark_values):
if landmark_values == []:
return
weights = []
for values in landmark_values:
dist_to_landmark = dist(particle, LANDMARK_POSITIONS[values[0]])
dist_weight = normal(values[1], dist_to_landmark, SIGMA)
angle_to_landmark = calc_angle(
particle, LANDMARK_POSITIONS[values[0]], dist_to_landmark)
angle_weight = normal(values[2], angle_to_landmark, SIGMA_THETA)
weights.append(dist_weight * angle_weight)
particle.setWeight(np.product(weights))
def turn_90_degrees(noah, est_pose, particles):
x_values = [i[0] for i in LANDMARK_POSITIONS.values()]
y_values = [i[1] for i in LANDMARK_POSITIONS.values()]
middle = np.array([max(x_values)/2, max(y_values)/2])
current_position = np.array([est_pose.x, est_pose.y])
relative_pos = middle - current_position
angle = np.arctan(relative_pos[1]/relative_pos[0])
relative_angle = np.mod(angle - est_pose.theta, 2.0*np.pi)
clockwise = relative_angle > 180
if clockwise:
noah.go_diff(POWER, POWER, 1, 0)
time.sleep((90 * TURN_T * CLOCKWISE_OFFSET)/1000)
else:
noah.go_diff(POWER, POWER, 0, 1)
time.sleep((90 * TURN_T)/1000)
noah.stop()
for p in particles:
if clockwise:
p.setTheta(p.theta - np.deg2rad(90))
else:
p.setTheta(p.theta + np.deg2rad(90))
particle.add_uncertainty(particles, SIGMA, SIGMA_THETA)
_, est_pose, particles = drive_until_stopped(noah, particles)
return est_pose, particles
def main(noah):
landmark_order = LANDMARKS + [
LANDMARKS[0]
]
cam = camera.Camera(0, 'arlo', useCaptureThread=True)
particles = initialize_particles(NUM_PARTICLES)
# The estimate of the robots current pose
est_pose = particle.estimate_pose(particles)
for landmark in landmark_order:
print(f"Going to landmark {landmark}")
while True:
particles, est_pose = look_around(noah, particles, cam, est_pose)
print(est_pose)
particles, est_pose = turn_towards_landmark(noah, particles, est_pose, landmark)
drive_time = time_to_landmark(est_pose, landmark)
time_driven, est_pose, particles = drive_until_stopped(noah, particles)
if not abs(time_driven - drive_time) < 0.5:
# drunk_drive(noah)
est_pose, particles = turn_90_degrees(noah, est_pose, particles)
continue
est_pose, particles = inch_closer(noah, particles)
break
if __name__ == "__main__":
noah = robot.Robot() # Noah er vores robots navn
try:
main(noah)
except KeyboardInterrupt:
noah.stop()

View File

@ -1,2 +1 @@
from .robot import Robot
from .arlo import Arlo

View File

@ -3,7 +3,7 @@ import numpy as np
import time
import sys
import threading
import selflocalization.framebuffer as framebuffer
from selflocalization import framebuffer
from pkg_resources import parse_version
@ -18,6 +18,8 @@ except ImportError:
def isRunningOnArlo():
"""Return True if we are running on Arlo, otherwise False."""
# TODO: Problematic that gstreamerCameraFound is first set after
# instantiation of a Camera object
return piCameraFound or gstreamerCameraFound
# Black magic in order to handle differences in namespace names in OpenCV 2.4 and 3.0
@ -98,31 +100,33 @@ class CaptureThread(threading.Thread):
class Camera(object):
"""This class is responsible for doing the image processing. It detects known landmarks and
"""This class is responsible for doing the image processing. It detects known landmarks and
measures distances and orientations to these."""
def __init__(self, camidx, robottype='arlo', useCaptureThread = False):
def __init__(self, camidx, robottype='arlo', useCaptureThread=False):
"""Constructor:
camidx - index of camera
robottype - specify which robot you are using in order to use the correct camera calibration.
Supported types: arlo, frindo, scribbler, macbookpro"""
print("robottype =", robottype)
self.useCaptureThread = useCaptureThread
# Set camera calibration info
if robottype == 'arlo':
self.imageSize = (1280, 720)
focal_length = 1050
self.imageSize = (1024, 720)
#self.intrinsic_matrix = np.asarray([ 7.1305391967046853e+02, 0., 3.1172820723774367e+02, 0.,
# 7.0564929862291285e+02, 2.5634470978315028e+02, 0., 0., 1. ], dtype = np.float64)
#self.intrinsic_matrix = np.asarray([ 6.0727040957659040e+02, 0., 3.0757300398967601e+02, 0.,
# 6.0768864690145904e+02, 2.8935674612358201e+02, 0., 0., 1. ], dtype = np.float64)
self.intrinsic_matrix = np.asarray([500, 0., self.imageSize[0] / 2.0, 0.,
500, self.imageSize[1] / 2.0, 0., 0., 1.], dtype = np.float64)
self.intrinsic_matrix = np.asarray([focal_length, 0., self.imageSize[0] / 2.0, 0.,
focal_length, self.imageSize[1] / 2.0, 0., 0., 1.], dtype = np.float64)
self.intrinsic_matrix.shape = (3, 3)
#self.distortion_coeffs = np.asarray([ 1.1911006165076067e-01, -1.0003366233413549e+00,
# 1.9287903277399834e-02, -2.3728201444308114e-03, -2.8137265581326476e-01 ], dtype = np.float64)
self.distortion_coeffs = np.asarray([0., 0., 2.0546093607192093e-02, -3.5538453075048249e-03, 0.], dtype = np.float64)
if robottype == 'frindo':
elif robottype == 'frindo':
self.imageSize = (640, 480)
#self.intrinsic_matrix = np.asarray([ 7.1305391967046853e+02, 0., 3.1172820723774367e+02, 0.,
# 7.0564929862291285e+02, 2.5634470978315028e+02, 0., 0., 1. ], dtype = np.float64)
@ -457,9 +461,9 @@ class Camera(object):
if (__name__=='__main__'):
print("Opening and initializing camera")
cam = Camera(0, 'macbookpro', useCaptureThread = True)
#cam = Camera(0, 'macbookpro', useCaptureThread=True)
#cam = Camera(0, 'macbookpro', useCaptureThread = False)
#cam = Camera(0, 'arlo', useCaptureThread = True)
cam = Camera(0, robottype='arlo', useCaptureThread=True)
# Open a window
WIN_RF1 = "Camera view"
@ -488,12 +492,12 @@ if (__name__=='__main__'):
#gray = cv2.convertScaleAbs(loggray)
# Detect objects
objectType, distance, angle, colourProb = cam.get_object(colour)
if objectType != 'none':
print("Object type = ", objectType, ", distance = ", distance, ", angle = ", angle, ", colourProb = ", colourProb)
#objectType, distance, angle, colourProb = cam.get_object(colour)
#if objectType != 'none':
# print("Object type = ", objectType, ", distance = ", distance, ", angle = ", angle, ", colourProb = ", colourProb)
# Draw detected pattern
cam.draw_object(colour)
#cam.draw_object(colour)
IDs, dists, angles = cam.detect_aruco_objects(colour)
if not isinstance(IDs, type(None)):

View File

@ -1,6 +1,6 @@
import random
import numpy as np
import selflocalization.random_numbers as rn
from selflocalization import random_numbers as rn
class Particle(object):
"""Data structure for storing particle information (state and weight)"""
@ -34,21 +34,26 @@ class Particle(object):
def setWeight(self, val):
self.weight = val
def copy(self):
return Particle(self.x, self.y, self.theta, self.weight)
def __str__(self) -> str:
return f"({self.x},{self.y},{self.theta})"
def estimate_pose(particles_list):
"""Estimate the pose from particles by computing the average position and orientation over all particles.
This is not done using the particle weights, but just the sample distribution."""
x_sum = 0.0
y_sum = 0.0
cos_sum = 0.0
sin_sum = 0.0
for particle in particles_list:
x_sum += particle.getX()
y_sum += particle.getY()
cos_sum += np.cos(particle.getTheta())
sin_sum += np.sin(particle.getTheta())
x_values = [i.getX() for i in particles_list]
y_values = [i.getY() for i in particles_list]
cos_values = [np.cos(i.getTheta()) for i in particles_list]
sin_values = [np.sin(i.getTheta()) for i in particles_list]
x_sum = sum(x_values)
y_sum = sum(y_values)
cos_sum = sum(cos_values)
sin_sum = sum(sin_values)
flen = len(particles_list)
if flen != 0:
x = x_sum / flen
@ -58,13 +63,14 @@ def estimate_pose(particles_list):
x = x_sum
y = y_sum
theta = 0.0
return Particle(x, y, theta)
def move_particle(particle, delta_x, delta_y, delta_theta):
"""Move the particle by (delta_x, delta_y, delta_theta)"""
print("particle.py: move_particle not implemented. You should do this.")
particle.setX(particle.getX() + delta_x)
particle.setY(particle.getY() + delta_y)
particle.setTheta(particle.getTheta() + delta_theta)
def add_uncertainty(particles_list, sigma, sigma_theta):
@ -83,3 +89,11 @@ def add_uncertainty_von_mises(particles_list, sigma, theta_kappa):
particle.x += rn.randn(0.0, sigma)
particle.y += rn.randn(0.0, sigma)
particle.theta = np.mod(rn.rand_von_mises(particle.theta, theta_kappa), 2.0 * np.pi) - np.pi
def resample(particle_list):
weights = [p.weight for p in particle_list]
if sum(weights) == 0:
weights = [1/len(particle_list) for _ in particle_list]
new_particles = random.choices(particle_list, weights, k=len(particle_list))
particle_list = [p.copy() for p in new_particles]
return particle_list

View File

@ -1,14 +1,14 @@
import cv2
import selflocalization.particle as particle
import selflocalization.camera as camera
import particle as particle
import camera as camera
import numpy as np
import time
from time import sleep
from timeit import default_timer as timer
import sys
# Flags
showGUI = True # Whether or not to open GUI windows
showGUI = False # Whether or not to open GUI windows
onRobot = True # Whether or not we are running on the Arlo robot
@ -20,8 +20,7 @@ def isRunningOnArlo():
if isRunningOnArlo():
# XXX: You need to change this path to point to where your robot.py file is located
sys.path.append("../../../../Arlo/python")
sys.path.append("..")
try:
@ -32,6 +31,14 @@ except ImportError:
onRobot = False
POWER = 70
TURN_T = 7.9 # 1 degree
DRIVE_T = 22 # 1 centimeter
RIGHT_WHEEL_OFFSET = 4
CLOCKWISE_OFFSET = 0.96
# Some color constants in BGR format
@ -46,19 +53,26 @@ CBLACK = (0, 0, 0)
# Landmarks.
# The robot knows the position of 2 landmarks. Their coordinates are in the unit centimeters [cm].
landmarkIDs = [1, 2]
landmarkIDs = [1, 5]
landmarks = {
1: (0.0, 0.0), # Coordinates for landmark 1
2: (300.0, 0.0) # Coordinates for landmark 2
5: (300.0, 0.0) # Coordinates for landmark 2
}
landmark_colors = [CRED, CGREEN] # Colors used when drawing the landmarks
SIGMA = 3
SIGMA_THETA = 0.3
def normal(x, mean, std):
return (
(np.exp(-(((x - mean)**2)/(2 * std**2))))/(np.sqrt(2 * np.pi) * std)
)
def jet(x):
"""Colour map for drawing particles. This function determines the colour of
"""Colour map for drawing particles. This function determines the colour of
a particle from its weight."""
r = (x >= 3.0/8.0 and x < 5.0/8.0) * (4.0 * x - 3.0/2.0) + (x >= 5.0/8.0 and x < 7.0/8.0) + (x >= 7.0/8.0) * (-4.0 * x + 9.0/2.0)
g = (x >= 1.0/8.0 and x < 3.0/8.0) * (4.0 * x - 1.0/2.0) + (x >= 3.0/8.0 and x < 5.0/8.0) + (x >= 5.0/8.0 and x < 7.0/8.0) * (-4.0 * x + 7.0/2.0)
@ -90,7 +104,7 @@ def draw_world(est_pose, particles, world):
y = ymax - (int(particle.getY() + offsetY))
colour = jet(particle.getWeight() / max_weight)
cv2.circle(world, (x,y), 2, colour, 2)
b = (int(particle.getX() + 15.0*np.cos(particle.getTheta()))+offsetX,
b = (int(particle.getX() + 15.0*np.cos(particle.getTheta()))+offsetX,
ymax - (int(particle.getY() + 15.0*np.sin(particle.getTheta()))+offsetY))
cv2.line(world, (x,y), b, colour, 2)
@ -102,7 +116,7 @@ def draw_world(est_pose, particles, world):
# Draw estimated robot pose
a = (int(est_pose.getX())+offsetX, ymax-(int(est_pose.getY())+offsetY))
b = (int(est_pose.getX() + 15.0*np.cos(est_pose.getTheta()))+offsetX,
b = (int(est_pose.getX() + 15.0*np.cos(est_pose.getTheta()))+offsetX,
ymax-(int(est_pose.getY() + 15.0*np.sin(est_pose.getTheta()))+offsetY))
cv2.circle(world, a, 5, CMAGENTA, 2)
cv2.line(world, a, b, CMAGENTA, 2)
@ -112,12 +126,61 @@ def draw_world(est_pose, particles, world):
def initialize_particles(num_particles):
particles = []
for i in range(num_particles):
# Random starting points.
# Random starting points.
p = particle.Particle(600.0*np.random.ranf() - 100.0, 600.0*np.random.ranf() - 250.0, np.mod(2.0*np.pi*np.random.ranf(), 2.0*np.pi), 1.0/num_particles)
particles.append(p)
return particles
def dist(particle, landmark):
return np.sqrt((landmark[0]-particle.x)**2+(landmark[1]-particle.y)**2)
def calc_angle(particle, landmark, dist):
e_theta = np.array([np.cos(particle.theta), np.sin(particle.theta)]).T
e_landmark = (np.array([landmark[0]-particle.x, landmark[1]-particle.y]).T)/dist
e_hat_theta = np.array([-np.sin(particle.theta), np.cos(particle.theta)]).T
return np.sign(np.dot(e_landmark, e_hat_theta)) * np.arccos(np.dot(e_landmark, e_theta))
def calc_weight(particle, landmark_values):
if landmark_values == []:
return
weights = []
for values in landmark_values:
dist_to_landmark = dist(particle, landmarks[values[0]])
dist_weight = normal(values[1], dist_to_landmark, SIGMA)
angle_to_landmark = calc_angle(particle, landmarks[values[0]], dist_to_landmark)
angle_weight = normal(values[2], angle_to_landmark, SIGMA_THETA)
weights.append(dist_weight * angle_weight)
particle.setWeight(np.product(weights))
def drive_towards_middle(est_pose, arlo):
middle = np.array([150, 0])
position = np.array([est_pose.x, est_pose.y])
theta = est_pose.theta
print(np.rad2deg(theta))
relative_pos = middle - position
print("relative_pos", relative_pos)
angle = np.rad2deg(np.arctan(relative_pos[1]/relative_pos[0]))
print(angle)
turn_angle = np.mod(angle - (np.rad2deg(theta) - 180), 360)
print(turn_angle)
drive_distance = np.sqrt(position[0]**2 + position[1]**2)
if turn_angle < 180:
arlo.go_diff(POWER, POWER, 0, 1)
sleep((abs(turn_angle) * TURN_T)/1000)
arlo.stop()
else:
arlo.go_diff(POWER, POWER, 1, 0)
sleep((abs(360 - turn_angle) * TURN_T * CLOCKWISE_OFFSET)/1000)
arlo.stop()
arlo.go_diff(POWER, POWER + RIGHT_WHEEL_OFFSET, 1, 1)
sleep((drive_distance * DRIVE_T)/1000)
arlo.stop()
# Main program #
try:
@ -133,7 +196,7 @@ try:
# Initialize particles
num_particles = 1000
num_particles = 10000
particles = initialize_particles(num_particles)
est_pose = particle.estimate_pose(particles) # The estimate of the robots current pose
@ -142,7 +205,7 @@ try:
velocity = 0.0 # cm/sec
angular_velocity = 0.0 # radians/sec
# Initialize the robot (XXX: You do this)
arlo = robot.Robot()
# Allocate space for world map
world = np.zeros((500,500,3), dtype=np.uint8)
@ -156,75 +219,71 @@ try:
else:
cam = camera.Camera(0, 'macbookpro', useCaptureThread = True)
while True:
# Move the robot according to user input (only for testing)
action = cv2.waitKey(10)
if action == ord('q'): # Quit
break
if not isRunningOnArlo():
if action == ord('w'): # Forward
velocity += 4.0
elif action == ord('x'): # Backwards
velocity -= 4.0
elif action == ord('s'): # Stop
velocity = 0.0
angular_velocity = 0.0
elif action == ord('a'): # Left
angular_velocity += 0.2
elif action == ord('d'): # Right
angular_velocity -= 0.2
# Use motor controls to update particles
# XXX: Make the robot drive
# XXX: You do this
# Fetch next frame
colour = cam.get_next_frame()
landmark_values = []
# Detect objects
objectIDs, dists, angles = cam.detect_aruco_objects(colour)
if not isinstance(objectIDs, type(None)):
# List detected objects
for i in range(len(objectIDs)):
print("Object ID = ", objectIDs[i], ", Distance = ", dists[i], ", angle = ", angles[i])
# XXX: Do something for each detected object - remember, the same ID may appear several times
if objectIDs[i] in landmarkIDs:
landmark_values.append((objectIDs[i], dists[i], angles[i]))
# Compute particle weights
# XXX: You do this
for p in particles:
calc_weight(p, landmark_values)
# Resampling
# XXX: You do this
particles = particle.resample(particles)
# Draw detected objects
cam.draw_aruco_objects(colour)
else:
# No observation - reset weights to uniform distribution
for p in particles:
p.setWeight(1.0/num_particles)
particle.add_uncertainty(particles, SIGMA, SIGMA_THETA)
est_pose = particle.estimate_pose(particles) # The estimate of the robots current pose
calc_weight(est_pose, landmark_values)
est_weight = est_pose.weight * 10**5
print("est_weight:", est_weight)
if est_weight > 1:
draw_world(est_pose, particles, world)
cv2.imwrite("test.png", world)
break
# else:
# arlo.go_diff(POWER, POWER, 0, 1)
# sleep((15 * TURN_T)/1000)
# arlo.stop()
# for p in particles:
# p.setTheta(p.theta + np.deg2rad(15))
if showGUI:
# Draw map
draw_world(est_pose, particles, world)
# Show frame
cv2.imshow(WIN_RF1, colour)
# Show world
cv2.imshow(WIN_World, world)
finally:
print(est_pose.x, est_pose.y, est_pose.theta)
drive_towards_middle(est_pose, arlo)
finally:
# Make sure to clean up even if an exception occurred
# Close all windows
cv2.destroyAllWindows()