diff --git a/.gitignore b/.gitignore index b6e4761..9aa6846 100644 --- a/.gitignore +++ b/.gitignore @@ -127,3 +127,6 @@ dmypy.json # Pyre type checker .pyre/ + +# weights +*.weights \ No newline at end of file diff --git a/Code/Utils.py b/Code/Utils.py index 2a157e3..b0a6c9a 100644 --- a/Code/Utils.py +++ b/Code/Utils.py @@ -5,10 +5,10 @@ def load_data(data_dir): - image_files = sorted(glob.glob(data_dir+"//images//*.png")) - point_files = sorted(glob.glob(data_dir+"//points//*.pcd")) - label_files = sorted(glob.glob(data_dir+"//labels//*.txt")) - calib_files = sorted(glob.glob(data_dir+"//calibs//*.txt")) + image_files = sorted(glob.glob(data_dir+"/images/*.png")) + point_files = sorted(glob.glob(data_dir+"/points/*.pcd")) + label_files = sorted(glob.glob(data_dir+"/labels/*.txt")) + calib_files = sorted(glob.glob(data_dir+"/calibs/*.txt")) return image_files, point_files, label_files, calib_files diff --git a/Code/YoloDetector.py b/Code/YoloDetector.py index f28ff1e..35c84ca 100644 --- a/Code/YoloDetector.py +++ b/Code/YoloDetector.py @@ -18,7 +18,7 @@ def __init__(self, conf_threshold=0.4, classes_to_detect=None, nms_threshold=0.4 def get_layers(self): ln = self.model.getLayerNames() - return [ln[i[0] - 1] for i in self.model.getUnconnectedOutLayers()] + return [ln[i - 1] for i in self.model.getUnconnectedOutLayers()] def read_names(self, names_path): f = open(names_path, "r") @@ -96,4 +96,4 @@ def detect(self, image, draw_bboxes=False, display_labels=False): detections.append([class_id, bbox, conf]) - return np.array(detections), img \ No newline at end of file + return np.array(detections, dtype='object'), img \ No newline at end of file diff --git a/Code/main.py b/Code/main.py index ef7e2c3..ea79fbf 100644 --- a/Code/main.py +++ b/Code/main.py @@ -16,7 +16,7 @@ def low_level_fusion(data_dir, show_random_pcl=False, display_video=True, save_ imgs, pts, lbls, calbs = ut.load_data(data_dir) if save_video: - out_dir = os.path.join(data_dir, "output//videos") + out_dir = os.path.join(data_dir, "output/videos") if not os.path.exists(out_dir): os.makedirs(out_dir) @@ -26,37 +26,25 @@ def low_level_fusion(data_dir, show_random_pcl=False, display_video=True, save_ o3d.visualization.draw_geometries([pcd]) lidar2cam = l2c.LiDAR2Camera(calbs[0]) - print("P :"+str(lidar2cam.P)) - print("-") - print("RO "+str(lidar2cam.R0)) - print("-") - print("Velo 2 Cam " +str(lidar2cam.V2C)) - - video_images = sorted(glob.glob(data_dir+"//test//video4//images/*.png")) - video_points = sorted(glob.glob(data_dir+"//test//video4//points/*.pcd")) result_video = [] - weights = data_dir + "//model//yolov4//yolov4.weights" - config = data_dir + "//model//yolov4//yolov4.cfg" - names = data_dir + "//model//yolov4//coco.names" + weights = data_dir + "/model/yolov4/yolov4.weights" + config = data_dir + "/model/yolov4/yolov4.cfg" + names = data_dir + "/model/yolov4/coco.names" detector = yd.Detector(0.4) detector.load_model(weights, config, names) - image = cv2.imread(video_images[0]) - - if display_video: - cv2.namedWindow("fused_result", cv2.WINDOW_KEEPRATIO) - - for idx, img_path in enumerate(video_images): + for idx, img_path in enumerate(imgs): image = cv2.imread(img_path) detections, image = detector.detect(image, True, True) - point_cloud = np.asarray(o3d.io.read_point_cloud(video_points[idx]).points) - # image = lu.display_lidar_on_image(lidar2cam, point_cloud, image) + point_cloud = np.asarray(o3d.io.read_point_cloud(pts[idx]).points) + lidar2img = lu.display_lidar_on_image(lidar2cam, point_cloud, image) pts_3D, pts_2D = lu.get_lidar_on_image(lidar2cam, point_cloud, (image.shape[1], image.shape[0])) image, _ = fu.lidar_camera_fusion(pts_3D, pts_2D, detections, image) if display_video: + cv2.imshow("projected_result", lidar2img) cv2.imshow("fused_result", image) cv2.waitKey(10) if save_video: @@ -75,20 +63,17 @@ def mid_level_fusion(data_dir, index=0, display_image=True, save_image=False): imgs, pts, labels, calibs = ut.load_data(data_dir) if save_image: - out_dir = os.path.join(data_dir, "output//images") + out_dir = os.path.join(data_dir, "output/images") if not os.path.exists(out_dir): os.makedirs(out_dir) - weights = data_dir + "//model//yolov4//yolov4.weights" - config = data_dir + "//model//yolov4//yolov4.cfg" - names = data_dir + "//model//yolov4//coco.names" + weights = data_dir + "/model/yolov4/yolov4.weights" + config = data_dir + "/model/yolov4/yolov4.cfg" + names = data_dir + "/model/yolov4/coco.names" detector = yd.Detector(0.4) detector.load_model(weights, config, names) - if display_image: - cv2.namedWindow("fused_result", cv2.WINDOW_KEEPRATIO) - """PIPELINE STARTS FROM HERE""" # load the image @@ -98,7 +83,7 @@ def mid_level_fusion(data_dir, index=0, display_image=True, save_image=False): lidar2cam = l2c.LiDAR2Camera(calibs[index]) # 1 - Run 2D object detection on image - detections, yolo_detections = detector.detect(image, draw_bboxes=False, display_labels=False) + detections, yolo_detections = detector.detect(image, draw_bboxes=True, display_labels=True) # load lidar points and project them inside 2d detection point_cloud = np.asarray(o3d.io.read_point_cloud(pts[index]).points) @@ -131,7 +116,7 @@ def mid_level_fusion(data_dir, index=0, display_image=True, save_image=False): cv2.imshow("lidar_2d", lidar_2d) cv2.imshow("yolo_detections", yolo_detections) cv2.imshow("lidar_pts_img", lidar_pts_img) - cv2.imshow("final_image", final_image) + cv2.imshow("fused_result", final_image) cv2.waitKey(0) if save_image: cv2.imwrite(os.path.join(out_dir,"fused_result.png"), final_image) @@ -140,6 +125,6 @@ def mid_level_fusion(data_dir, index=0, display_image=True, save_image=False): if __name__ == "__main__": - data_dir = "..//Data//" + data_dir = "../Data/" # low_level_fusion(data_dir, show_random_pcl=False, display_video=True, save_video=False) mid_level_fusion(data_dir, index=0, display_image=True, save_image=False) \ No newline at end of file diff --git a/README.md b/README.md index e41c2d7..1dc418c 100644 --- a/README.md +++ b/README.md @@ -60,6 +60,24 @@ * Green Bounding Boxes are detected by YOlO whereas Blue Bounding Boxes are calculated using LiDAR points * YOLO missed 1 vehicle, whereas 2 vehicles are missed by LiDAR, one of which is half out of frame, at the bottom right side +## Usage + +**Installation** + +```bash +git clone git@github.com:mjoshi07/Visual-Sensor-Fusion.git +pip install -r requirements.txt # we need numpy, open3d, opencv, etc. +``` + +**Run** + +you can choose low-level or mid-level fusion and visualization options in [main.py](./Code/main.py)` + +```bash +cd Visual-Sensor-Fusion/Code +python main.py +``` + ## File Structure . ├── Code @@ -97,7 +115,7 @@ | ├── ... ### TODO -- [ ] Add Run Instructions -- [ ] Add Dependencies +- [x] Add Run Instructions +- [x] Add Dependencies - [ ] Add References - [ ] High-Level Fusion diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 0000000..0f215b0 --- /dev/null +++ b/requirements.txt @@ -0,0 +1,7 @@ +matplotlib==3.7.2 +matplotlib==3.1.2 +numpy==1.24.4 +numpy==1.17.4 +open3d==0.13.0 +opencv_python==4.8.0.74 +scipy==1.11.3