Skip to content

Commit ff17c03

Browse files
authored
Added Indoor-Navigation
1 parent ac82671 commit ff17c03

20 files changed

+1423
-0
lines changed
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,72 @@
1+
import cv2 as cv
2+
import os
3+
4+
CHESS_BOARD_DIM = (9, 6)
5+
6+
n = 0 # image_counter
7+
8+
# checking if images dir is exist not, if not then create images directory
9+
image_dir_path = "images"
10+
11+
CHECK_DIR = os.path.isdir(image_dir_path)
12+
# if directory does not exist create
13+
if not CHECK_DIR:
14+
os.makedirs(image_dir_path)
15+
print(f'"{image_dir_path}" Directory is created')
16+
else:
17+
print(f'"{image_dir_path}" Directory already Exists.')
18+
19+
criteria = (cv.TERM_CRITERIA_EPS + cv.TERM_CRITERIA_MAX_ITER, 30, 0.001)
20+
21+
22+
def detect_checker_board(image, grayImage, criteria, boardDimension):
23+
ret, corners = cv.findChessboardCorners(grayImage, boardDimension)
24+
if ret == True:
25+
corners1 = cv.cornerSubPix(grayImage, corners, (3, 3), (-1, -1), criteria)
26+
image = cv.drawChessboardCorners(image, boardDimension, corners1, ret)
27+
28+
return image, ret
29+
30+
31+
cap = cv.VideoCapture(0)
32+
33+
while True:
34+
_, frame = cap.read()
35+
copyFrame = frame.copy()
36+
gray = cv.cvtColor(frame, cv.COLOR_BGR2GRAY)
37+
38+
image, board_detected = detect_checker_board(frame, gray, criteria, CHESS_BOARD_DIM)
39+
# print(ret)
40+
cv.putText(
41+
frame,
42+
f"saved_img : {n}",
43+
(30, 40),
44+
cv.FONT_HERSHEY_PLAIN,
45+
1.4,
46+
(0, 255, 0),
47+
2,
48+
cv.LINE_AA,
49+
)
50+
51+
cv.imshow("frame", frame)
52+
cv.imshow("copyFrame", copyFrame)
53+
54+
key = cv.waitKey(1)
55+
56+
if key == ord("q"):
57+
break
58+
if key == ord("s") and board_detected == True:
59+
# storing the checker board image
60+
cv.imwrite(f"{image_dir_path}/image{n}.png", copyFrame)
61+
62+
print(f"saved image number {n}")
63+
n += 1 # incrementing the image counter
64+
cap.release()
65+
cv.destroyAllWindows()
66+
67+
print("Total saved Images:", n)
68+
69+
70+
71+
72+
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,87 @@
1+
import cv2 as cv
2+
import os
3+
import numpy as np
4+
5+
# Checker board size
6+
CHESS_BOARD_DIM = (9, 6)
7+
8+
# The size of Square in the checker board.
9+
SQUARE_SIZE = 14 # millimeters
10+
11+
# termination criteria
12+
criteria = (cv.TERM_CRITERIA_EPS + cv.TERM_CRITERIA_MAX_ITER, 30, 0.001)
13+
14+
15+
calib_data_path = "../calib_data"
16+
CHECK_DIR = os.path.isdir(calib_data_path)
17+
18+
19+
if not CHECK_DIR:
20+
os.makedirs(calib_data_path)
21+
print(f'"{calib_data_path}" Directory is created')
22+
23+
else:
24+
print(f'"{calib_data_path}" Directory already Exists.')
25+
26+
# prepare object points, like (0,0,0), (1,0,0), (2,0,0) ....,(6,5,0)
27+
obj_3D = np.zeros((CHESS_BOARD_DIM[0] * CHESS_BOARD_DIM[1], 3), np.float32)
28+
29+
obj_3D[:, :2] = np.mgrid[0 : CHESS_BOARD_DIM[0], 0 : CHESS_BOARD_DIM[1]].T.reshape(
30+
-1, 2
31+
)
32+
obj_3D *= SQUARE_SIZE
33+
print(obj_3D)
34+
35+
# Arrays to store object points and image points from all the images.
36+
obj_points_3D = [] # 3d point in real world space
37+
img_points_2D = [] # 2d points in image plane.
38+
39+
# The images directory path
40+
image_dir_path = "images"
41+
42+
files = os.listdir(image_dir_path)
43+
for file in files:
44+
print(file)
45+
imagePath = os.path.join(image_dir_path, file)
46+
# print(imagePath)
47+
48+
image = cv.imread(imagePath)
49+
grayScale = cv.cvtColor(image, cv.COLOR_BGR2GRAY)
50+
ret, corners = cv.findChessboardCorners(image, CHESS_BOARD_DIM, None)
51+
if ret == True:
52+
obj_points_3D.append(obj_3D)
53+
corners2 = cv.cornerSubPix(grayScale, corners, (3, 3), (-1, -1), criteria)
54+
img_points_2D.append(corners2)
55+
56+
img = cv.drawChessboardCorners(image, CHESS_BOARD_DIM, corners2, ret)
57+
58+
cv.destroyAllWindows()
59+
# h, w = image.shape[:2]
60+
ret, mtx, dist, rvecs, tvecs = cv.calibrateCamera(
61+
obj_points_3D, img_points_2D, grayScale.shape[::-1], None, None
62+
)
63+
print("calibrated")
64+
65+
print("duming the data into one files using numpy ")
66+
np.savez(
67+
f"{calib_data_path}/MultiMatrix",
68+
camMatrix=mtx,
69+
distCoef=dist,
70+
rVector=rvecs,
71+
tVector=tvecs,
72+
)
73+
74+
print("-------------------------------------------")
75+
76+
print("loading data stored using numpy savez function\n \n \n")
77+
78+
data = np.load(f"{calib_data_path}/MultiMatrix.npz")
79+
80+
camMatrix = data["camMatrix"]
81+
distCof = data["distCoef"]
82+
rVector = data["rVector"]
83+
tVector = data["tVector"]
84+
85+
print("loaded calibration data successfully")
86+
87+
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,107 @@
1+
import cv2 as cv
2+
from cv2 import aruco
3+
import numpy as np
4+
calib_data_path = "../calib_data/MultiMatrix.npz"
5+
calib_data = np.load(calib_data_path)
6+
print(calib_data.files)
7+
cam_mat = calib_data["camMatrix"]
8+
dist_coef = calib_data["distCoef"]
9+
r_vectors = calib_data["rVector"]
10+
t_vectors = calib_data["tVector"]
11+
MARKER_SIZE = 8 # centimeters
12+
marker_dict = aruco.getPredefinedDictionary(aruco.DICT_4X4_50)
13+
param_markers = aruco.DetectorParameters()
14+
cap = cv.VideoCapture(0)
15+
mat=None
16+
dist=None
17+
18+
19+
def my_estimatePoseSingleMarkers(corners, marker_size):
20+
21+
marker_points = np.array(
22+
[
23+
[-marker_size / 2, marker_size / 2, 0],
24+
[marker_size / 2, marker_size / 2, 0],
25+
[marker_size / 2, -marker_size / 2, 0],
26+
[-marker_size / 2, -marker_size / 2, 0],
27+
],
28+
dtype=np.float32,
29+
)
30+
trash = []
31+
rvecs = []
32+
tvecs = []
33+
i = 0
34+
mat = calib_data["camMatrix"]
35+
dist = calib_data["distCoef"]
36+
for c in corners:
37+
nada, R, t = cv.solvePnP(
38+
marker_points, corners[i],mat, dist, False, cv.SOLVEPNP_IPPE_SQUARE
39+
)
40+
rvecs.append(R)
41+
tvecs.append(t)
42+
trash.append(nada)
43+
return rvecs, tvecs, trash
44+
while True:
45+
ret, frame = cap.read()
46+
if not ret:
47+
break
48+
gray_frame = cv.cvtColor(frame, cv.COLOR_BGR2GRAY)
49+
detector = cv.aruco.ArucoDetector(marker_dict,param_markers)
50+
(marker_corners, marker_IDs, rejected)=detector.detectMarkers(gray_frame)
51+
if marker_corners:
52+
rVec, tVec, trash = my_estimatePoseSingleMarkers(marker_corners, MARKER_SIZE)
53+
total_markers = range(0, marker_IDs.size)
54+
for ids, corners, i in zip(marker_IDs, marker_corners, total_markers):
55+
cv.polylines(
56+
frame, [corners.astype(np.int32)], True, (0, 255, 255), 4, cv.LINE_AA
57+
)
58+
corners = corners.reshape(4, 2)
59+
corners = corners.astype(int)
60+
top_right = corners[0].ravel()
61+
top_right = tuple(top_right)
62+
top_left = corners[1].ravel()
63+
bottom_right = corners[2].ravel()
64+
bottom_right = tuple(bottom_right)
65+
bottom_left = corners[3].ravel()
66+
print(tVec[i][2])
67+
68+
# Since there was mistake in calculating the distance approach point-outed in the Video Tutorial's comment
69+
# so I have rectified that mistake, I have test that out it increase the accuracy overall.
70+
# Calculating the distance
71+
distance = np.sqrt(
72+
tVec[i][2][0]**2 + tVec[i][0][0]**2 + tVec[i][1][0]**2
73+
74+
)
75+
# Draw the pose of the marker
76+
77+
point = cv.drawFrameAxes(frame, cam_mat, dist_coef, rVec[i], tVec[i], 4, 4)
78+
cv.putText(
79+
frame,
80+
81+
82+
83+
f"id: {ids[0]} Dist: {round(distance, 2)}",
84+
top_right,
85+
cv.FONT_HERSHEY_PLAIN,
86+
1.3,
87+
(0, 0, 255),
88+
2,
89+
cv.LINE_AA,
90+
)
91+
cv.putText(
92+
frame,
93+
f"x:{round(tVec[i][0][0],1)} y: {round(tVec[i][1][0],1)} ",
94+
bottom_right,
95+
cv.FONT_HERSHEY_PLAIN,
96+
1.0,
97+
(0, 0, 255),
98+
2,
99+
cv.LINE_AA,
100+
)
101+
# print(ids, " ", corners)
102+
cv.imshow("frame", frame)
103+
key = cv.waitKey(1)
104+
if key == ord("q"):
105+
break
106+
cap.release()
107+
cv.destroyAllWindows()
+121
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,121 @@
1+
# Indoor Navigation using ArUco Markers
2+
3+
## Description
4+
5+
In our project, we've employed Turtlebot for autonomous indoor navigation which involves using marker-based localization, like ArUco. These markers act as crucial reference points for the robot's autonomous movement within unfamiliar indoor spaces. This approach is cost-effective, simply requiring the deployment of marker patterns and protective measures. For the completion of this project, we also made use of ROS, OpenCV, and Gazebo simulation, enabling us to navigate, control, and process data for various robotic applications, which could include warehouse management
6+
7+
## Methodology
8+
9+
- **Turtlesim**
10+
11+
We performed simple tasks on turtlesim node to get practical knowledge about ROS.
12+
How to write code in ROS, implement different publisher and subscriber nodes.
13+
14+
15+
| <img src="https://github.com/shreyaskkk12/Indoor-Navigation-IvLabs/assets/128238705/0b62366a-62af-420b-8637-6cbe254db07c" width="width_of_img" height="height" /> | <img src="https://github.com/shreyaskkk12/Indoor-Navigation-IvLabs/assets/128238705/ac06461e-3083-4cb3-99f0-d0db776115a3" width="width_of_img2" height="height2" /> |
16+
| :--: | :--: |
17+
| Circle | Spiral |
18+
19+
| <img src="https://github.com/shreyaskkk12/Indoor-Navigation-IvLabs/assets/128238705/829176c4-1675-45ef-821f-0036aa4a9588" width="width_of_img" height="height" /> | <img src="https://github.com/shreyaskkk12/Indoor-Navigation-IvLabs/assets/128238705/cf1a9fab-80ef-4ff1-b149-fcc0a4eb88d7" width="width_of_img2" height="height2" /> |
20+
| :--: | :--: |
21+
| Go To Goal | Follower |
22+
23+
24+
25+
- **SLAM - Hector and G-mapping**
26+
27+
Hector SLAM and Gmapping are both algorithms used for mapping and localization in robotics. Hector SLAM is a laser-based algorithm that can only afford indoor use, as the map created is small, while Gmapping can be used both indoors and outdoors. Hector SLAM is more efficient than Gmapping in terms of map drawing, but it has a limit on the size of the map it can create. On the other hand, Gmapping has no such limit and is more versatile than Hector SLAM
28+
29+
| <img src="https://github.com/shreyaskkk12/Indoor-Navigation-IvLabs/assets/128238705/460920a5-6d15-4d2b-b6ef-1b3d86b5347e" width="width_of_img" height="height" /> | <img src="https://github.com/shreyaskkk12/Indoor-Navigation-IvLabs/assets/128238705/46dbf6e9-b57e-4bb1-a68d-d7ee38f8ce56" width="width_of_img2" height="height2" /> |
30+
| :--: | :--: |
31+
| Hector Mapping | GMapping |
32+
33+
34+
- **Camera Calibration**
35+
36+
The process of estimating the parameters of a camera is called camera calibration. This includes recovering two kinds of parameters
37+
38+
1. **Internal parameters** of the camera/lens system. E.g. focal length, optical center, and radial distortion coefficients of the lens
39+
2. **External parameters**: This refers to the orientation (rotation and translation) of the camera with respect to some world coordinate system We used a checkerboard for the calibration of the camera as its pattern is easy to detect in an image, also its corners have a sharp gradient in two directions, so it is easy to detect the corners.
40+
41+
![Callibration](https://github.com/shreyaskkk12/Indoor-Navigation-IvLabs/assets/128238705/425b5acf-4fb0-4b32-b460-c4220e20e297)
42+
43+
44+
45+
- **Aruco Detection and Pose Estimation**
46+
47+
We used OpenCV’s aruco library for the detection and pose estimation of aruco markers.
48+
49+
![ArucoDetection](https://github.com/shreyaskkk12/Indoor-Navigation-IvLabs/assets/128238705/e2f04d65-2a48-43ef-8ab9-ec74e880b834)
50+
51+
52+
- **Gazebo Simulation**
53+
54+
We used two nodes for navigation. The camera node detects the markers and publishes data about markers like their IDs and distance. Nav node subscribes data from the camera node and publishes velocity commands to turtlebot.
55+
We used turtlebot’s ‘waffle_pi’ model for simulation as it comes with a camera. To get camera information we created a subscriber in the camera node.
56+
57+
Working :
58+
Nav node publishes velocity commands so turtlebot moves in a square loop until it finds any marker. After finding the marker bot will do a task assigned to that ID. We assign a simple task to each ID. The bot should go towards markers up to a certain distance and then again start searching for another marker.
59+
60+
![ezgif com-video-to-gif](https://github.com/shreyaskkk12/Indoor-Navigation-IvLabs/assets/128238705/a21c4525-08ee-471b-933f-c309023ac8c0)
61+
62+
63+
- **Implementation on Hardware**
64+
65+
Our turtlebot model was “burger”, which requires ‘ros-melodic’. So we require docker to create a ros-melodic workspace.
66+
We also had to change our Cam node code such that it takes video input from a webcam instead of a subscriber.
67+
68+
69+
## Requirements
70+
71+
Software:
72+
73+
- Ubuntu 20.04
74+
- Python3
75+
- ROS-Noetic
76+
- Docker Image for ROS-Melodic
77+
- OpenCV 4.8.x (Noetic)
78+
- OpenCV 4.2.x (Melodic)
79+
- Gazebo simulation package 
80+
81+
Hardware:
82+
83+
- TurtleBot Burger Model [Kobuki]
84+
- Webcam
85+
- YdLidar
86+
87+
## How to use the Project
88+
### <a name="_q09s3zjlqn8o"></a>**For Simulation**
89+
1. Create a catkin package
90+
1. Go to `cd ~/catkin_ws/src`
91+
1. Do `catkin_create_pkg indoor_nav rospy turtlesim geometry_msgs sensor_msgs std_msgs`
92+
1. Go back to `cd ~/catkin_ws` 
93+
1. Do `catkin_make`
94+
1. Put camFeed.py, velcmds.py and markers.launch (or any gazebo world) in your catkin package and make them executable files.
95+
1. In your terminal, run the following code:
96+
1. `roslaunch indoor_nav markers.launch`
97+
1. `rosrun indoor_nav cam.py`
98+
1. `rosrun indoor_nav navi.py`
99+
100+
### <a name="_wwhunr977nm1"></a>**For Hardware**
101+
1. Create a catkin package in **both**, your docker container (ROS-melodic) as well as your main device (ROS-noetic)
102+
1. Go to `cd ~/catkin_ws/src`
103+
1. Do `catkin_create_pkg indoor_nav rospy turtlesim geometry_msgs sensor_msgs std_msgs`
104+
1. Go back to `cd ~/catkin_ws `
105+
1. Do `catkin_make`
106+
1. Put lidar_data.py code in your ROS-Noetic package and camera.py, MultiMatrix.npz, and navi.py code as well as a lidar_data.txt file in your ROS-melodic package, and make these all executable files.
107+
1. Make sure to change calib_data_path in camera.py to MultiMatrix.npz path. Also, change lidar_data_path in lidar.py to the relative path of lidar_data.txt to your ROS-noetic terminal, and in navi.py to the relative path to your container.
108+
1. Connect your TurtleBot to your device and run the following code in your container:
109+
1. `bash .devcontainer/post_create_commands.sh`
110+
1. `roslaunch turtlebot_bringup minimal.launch`
111+
1. In your ROS-noetic terminal, run the following code:
112+
1. `roscore`
113+
1. `rosrun lidar_data.py`
114+
1. In your container, run the following code:
115+
1. `roscore`
116+
1. `rosrun camera.py`
117+
1. `rosrun navi.py`
118+
119+
## Results
120+
121+
![output](assets/output.gif)

0 commit comments

Comments
 (0)