Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

feat(pointpainting multiframe): add multi-sweep pointpainting #1462

Closed
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
29 commits
Select commit Hold shift + click to select a range
2696be3
pointpainting_fusion-paint points
tzhong518 Apr 11, 2022
41681d0
pointpainting_fusion:run the model
tzhong518 Apr 13, 2022
9738802
fix incorrect projection
tzhong518 Apr 20, 2022
bc80fca
update pointpainting_fusion using centerpoint as shared library
tzhong518 Apr 26, 2022
4b55948
pointpainting_fusion: fix uncommited node.cpp and node.hpp
tzhong518 Apr 26, 2022
2745511
fix: update the class
tzhong518 May 24, 2022
b213e54
fix: update the class
tzhong518 May 24, 2022
d4d6363
fix: small corrections
tzhong518 May 27, 2022
f5e7652
fix: correctionts to use centerpoint_trt.hpp
tzhong518 May 30, 2022
004406d
ci(pre-commit): autofix
pre-commit-ci[bot] May 30, 2022
17522e0
Merge branch 'autowarefoundation:main' into feature/pointpainting_fusion
tzhong518 May 31, 2022
6e65439
fix: resolve conversations
tzhong518 Jun 14, 2022
f5480d1
ci(pre-commit): autofix
pre-commit-ci[bot] Jun 14, 2022
47c6d84
fix: resolve conversations
tzhong518 Jun 15, 2022
5915e61
Merge branch 'feature/pointpainting_fusion' of github.com:tzhong518/a…
tzhong518 Jun 15, 2022
1d08963
ci(pre-commit): autofix
pre-commit-ci[bot] Jun 15, 2022
0b2c52a
fix: remove magic number
tzhong518 Jun 15, 2022
6d9fd01
Merge branch 'feature/pointpainting_fusion' of github.com:tzhong518/a…
tzhong518 Jun 15, 2022
c6bc2a4
fix: remove magic number7
tzhong518 Jun 15, 2022
190f7e8
fix: fix Cmakelist, doc, variable name and default value
tzhong518 Jun 21, 2022
3ecfaff
fix: CI build&precommit
tzhong518 Jun 24, 2022
5f25bc2
Merge branch 'autowarefoundation:main' into feature/pointpainting_fusion
tzhong518 Jun 24, 2022
79edef9
fix: pre-commit-optional&build(humble)
tzhong518 Jun 24, 2022
a670258
fix: move guard to publish()
tzhong518 Jun 28, 2022
5b97edc
ci(pre-commit): autofix
pre-commit-ci[bot] Jun 28, 2022
08b82b5
Merge branch 'autowarefoundation:main' into feature/pointpainting_fusion
tzhong518 Jun 29, 2022
f20cab5
Merge branch 'autowarefoundation:main' into feature/pointpainting_fusion
tzhong518 Jun 29, 2022
4d316ca
fix: update pointpainting model
tzhong518 Jul 1, 2022
28165ff
feat: latest update
Oct 14, 2022
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions perception/image_projection_based_fusion/.gitignore
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
/data/
170 changes: 158 additions & 12 deletions perception/image_projection_based_fusion/CMakeLists.txt
Original file line number Diff line number Diff line change
@@ -1,19 +1,13 @@
cmake_minimum_required(VERSION 3.14)
project(image_projection_based_fusion)
add_compile_options(-Wno-unknown-pragmas)

find_package(autoware_cmake REQUIRED)
autoware_package()

find_package(OpenCV REQUIRED)
find_package(Eigen3 REQUIRED)
autoware_package()

include_directories(
include
SYSTEM
${OpenCV_INCLUDE_DIRS}
${EIGEN3_INCLUDE_DIRS}
)

# Build non-CUDA dependent nodes
ament_auto_add_library(${PROJECT_NAME} SHARED
src/fusion_node.cpp
src/debugger.cpp
Expand All @@ -22,21 +16,173 @@ ament_auto_add_library(${PROJECT_NAME} SHARED
src/roi_cluster_fusion/node.cpp
src/roi_detected_object_fusion/node.cpp
)

target_link_libraries(${PROJECT_NAME}
${OpenCV_LIBRARIES}
${EIGEN3_LIBRARIES}
)

rclcpp_components_register_node(${PROJECT_NAME}
PLUGIN "image_projection_based_fusion::RoiDetectedObjectFusionNode"
EXECUTABLE roi_detected_object_fusion_node
)

rclcpp_components_register_node(${PROJECT_NAME}
PLUGIN "image_projection_based_fusion::RoiClusterFusionNode"
EXECUTABLE roi_cluster_fusion_node
)

rclcpp_components_register_node(${PROJECT_NAME}
PLUGIN "image_projection_based_fusion::RoiDetectedObjectFusionNode"
EXECUTABLE roi_detected_object_fusion_node
set(CUDA_VERBOSE OFF)

# set flags for CUDA availability
option(CUDA_AVAIL "CUDA available" OFF)
find_package(CUDA)
if(CUDA_FOUND)
find_library(CUBLAS_LIBRARIES cublas HINTS
${CUDA_TOOLKIT_ROOT_DIR}/lib64
${CUDA_TOOLKIT_ROOT_DIR}/lib
)
if(CUDA_VERBOSE)
message("CUDA is available!")
message("CUDA Libs: ${CUDA_LIBRARIES}")
message("CUDA Headers: ${CUDA_INCLUDE_DIRS}")
endif()
# Note: cublas_device was depreciated in CUDA version 9.2
# https://forums.developer.nvidia.com/t/where-can-i-find-libcublas-device-so-or-libcublas-device-a/67251/4
# In LibTorch, CUDA_cublas_device_LIBRARY is used.
unset(CUDA_cublas_device_LIBRARY CACHE)
set(CUDA_AVAIL ON)
else()
message("CUDA NOT FOUND")
set(CUDA_AVAIL OFF)
endif()

# set flags for TensorRT availability
option(TRT_AVAIL "TensorRT available" OFF)
# try to find the tensorRT modules
find_library(NVINFER nvinfer)
find_library(NVONNXPARSER nvonnxparser)
if(NVINFER AND NVONNXPARSER)
if(CUDA_VERBOSE)
message("TensorRT is available!")
message("NVINFER: ${NVINFER}")
message("NVONNXPARSER: ${NVONNXPARSER}")
endif()
set(TRT_AVAIL ON)
else()
message("TensorRT is NOT Available")
set(TRT_AVAIL OFF)
endif()

# set flags for CUDNN availability
option(CUDNN_AVAIL "CUDNN available" OFF)
# try to find the CUDNN module
find_library(CUDNN_LIBRARY
NAMES libcudnn.so${__cudnn_ver_suffix} libcudnn${__cudnn_ver_suffix}.dylib ${__cudnn_lib_win_name}
PATHS $ENV{LD_LIBRARY_PATH} ${__libpath_cudart} ${CUDNN_ROOT_DIR} ${PC_CUDNN_LIBRARY_DIRS} ${CMAKE_INSTALL_PREFIX}
PATH_SUFFIXES lib lib64 bin
DOC "CUDNN library."
)
if(CUDNN_LIBRARY)
if(CUDA_VERBOSE)
message(STATUS "CUDNN is available!")
message(STATUS "CUDNN_LIBRARY: ${CUDNN_LIBRARY}")
endif()
set(CUDNN_AVAIL ON)
else()
message("CUDNN is NOT Available")
set(CUDNN_AVAIL OFF)
endif()

# Create folder to store trained models.
# NOTE: This must be created regardless of CUDA_AVAIL to be specified in ament_auto_package()
set(DATA_PATH ${CMAKE_CURRENT_SOURCE_DIR}/data)
execute_process(COMMAND mkdir -p ${DATA_PATH})

if(TRT_AVAIL AND CUDA_AVAIL AND CUDNN_AVAIL)
# Download trained models

message(STATUS "start to download")
function(download FILE_NAME FILE_HASH)
message(STATUS "Checking and downloading ${FILE_NAME}")
set(FILE_PATH ${DATA_PATH}/${FILE_NAME})
set(STATUS_CODE 0)
message(STATUS "start ${FILE_NAME}")
if(EXISTS ${FILE_PATH})
message(STATUS "found ${FILE_NAME}")
file(MD5 ${FILE_PATH} EXISTING_FILE_HASH)
if("${FILE_HASH}" STREQUAL "${EXISTING_FILE_HASH}")
message(STATUS "same ${FILE_NAME}")
message(STATUS "File already exists.")
else()
message(STATUS "diff ${FILE_NAME}")
message(STATUS "File hash changes. Downloading now ...")
file(DOWNLOAD https://awf.ml.dev.web.auto/perception/models/pointpainting/v2/${FILE_NAME}
${FILE_PATH} STATUS DOWNLOAD_STATUS TIMEOUT 300)
list(GET DOWNLOAD_STATUS 0 STATUS_CODE)
list(GET DOWNLOAD_STATUS 1 ERROR_MESSAGE)
endif()
else()
message(STATUS "not found ${FILE_NAME}")
message(STATUS "File doesn't exists. Downloading now ...")
file(DOWNLOAD https://awf.ml.dev.web.auto/perception/models/pointpainting/v2/${FILE_NAME}
${FILE_PATH} STATUS DOWNLOAD_STATUS TIMEOUT 300)
list(GET DOWNLOAD_STATUS 0 STATUS_CODE)
list(GET DOWNLOAD_STATUS 1 ERROR_MESSAGE)
endif()
if(${STATUS_CODE} EQUAL 0)
message(STATUS "Download completed successfully!")
else()
message(FATAL_ERROR "Error occurred during download: ${ERROR_MESSAGE}")
endif()
endfunction()

# default model
download(pts_voxel_encoder_pointpainting.onnx 438dfecd962631ec8f011e0dfa2c6160)
download(pts_backbone_neck_head_pointpainting.onnx e590a0b2bdcd35e01340cf4521bf149e)
find_package(OpenCV REQUIRED)
find_package(Eigen3 REQUIRED)

include_directories(
include
${OpenCV_INCLUDE_DIRS}
${EIGEN3_INCLUDE_DIRS}
${PCL_INCLUDE_DIRS}
)

ament_auto_add_library(pointpainting_lib SHARED
src/pointpainting_fusion/node.cpp
src/pointpainting_fusion/pointpainting_trt.cpp
src/pointpainting_fusion/voxel_generator.cpp
)

cuda_add_library(pointpainting_cuda_lib SHARED
src/pointpainting_fusion/preprocess_kernel.cu
)

target_link_libraries(pointpainting_lib
${OpenCV_LIBRARIES}
${EIGEN3_LIBRARIES}
${PCL_LIBRARIES}
${NVINFER}
${CUDA_LIBRARIES}
${CUBLAS_LIBRARIES}
${CUDA_curand_LIBRARY}
${CUDNN_LIBRARY}
pointpainting_cuda_lib
)

rclcpp_components_register_node(pointpainting_lib
PLUGIN "image_projection_based_fusion::PointpaintingFusionNode"
EXECUTABLE pointpainting_fusion_node
)

else()
message("Skipping build of some nodes due to missing dependencies")
endif()

ament_auto_package(INSTALL_TO_SHARE
launch
config
data
)
Original file line number Diff line number Diff line change
@@ -0,0 +1,10 @@
/**:
ros__parameters:
class_names: ["CAR", "PEDESTRIAN", "BICYCLE"]
rename_car_to_truck_and_bus: true
point_feature_size: 7 # x, y, z, timelag and car, pedestrian, bicycle
max_voxel_size: 40000
point_cloud_range: [-76.8, -76.8, -3.0, 76.8, 76.8, 5.0]
voxel_size: [0.32, 0.32, 8.0]
downsample_factor: 1
encoder_in_feature_size: 12
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Original file line number Diff line number Diff line change
@@ -0,0 +1,89 @@
# pointpainting_fusion

## Purpose

The `pointpainting_fusion` is a package for utilizing the class information detected by a 2D object detection in 3D object detection.

## Inner-workings / Algorithms

The lidar points are projected onto the output of an image-only 2d object detection network and the class scores are appended to each point. The painted point cloud can then be fed to the centerpoint network.

![pointpainting_fusion_image](./images/pointpainting_fusion.jpg)

## Inputs / Outputs

### Input

| Name | Type | Description |
| --------------------- | -------------------------------------------------------- | ---------------------------------------------------------------------------------- |
| `input` | `sensor_msgs::msg::PointCloud2` | input pointcloud |
| `input/camera_infoID` | `sensor_msgs::msg::CameraInfo` | camera information to project 3d points onto image planes, `ID` is between 0 and 7 |
| `input/roisID` | `tier4_perception_msgs::msg::DetectedObjectsWithFeature` | ROIs from each image, `ID` is between 0 and 7 |
| `input/image_rawID` | `sensor_msgs::msg::Image` | images for visualization, `ID` is between 0 and 7 |

| `

### Output

| Name | Type | Description |
| -------------------- | ----------------------------------------------------- | ------------------------------------------------- |
| `output` | `sensor_msgs::msg::PointCloud2` | painted pointclouda |
| `~/output/objects` | `autoware_auto_perception_msgs::msg::DetectedObjects` | detected objects |
| `output/image_rawID` | `sensor_msgs::msg::Image` | images for visualization, `ID` is between 0 and 7 |

## Parameters

### Core Parameters

| Name | Type | Default Value | Description |
| ------------------------------- | ------ | ------------- | ----------------------------------------------------------- |
| `score_threshold` | float | `0.4` | detected objects with score less than threshold are ignored |
| `densification_world_frame_id` | string | `map` | the world frame id to fuse multi-frame pointcloud |
| `densification_num_past_frames` | int | `0` | the number of past frames to fuse with the current frame |
| `trt_precision` | string | `fp16` | TensorRT inference precision: `fp32` or `fp16` |
| `encoder_onnx_path` | string | `""` | path to VoxelFeatureEncoder ONNX file |
| `encoder_engine_path` | string | `""` | path to VoxelFeatureEncoder TensorRT Engine file |
| `head_onnx_path` | string | `""` | path to DetectionHead ONNX file |
| `head_engine_path` | string | `""` | path to DetectionHead TensorRT Engine file |

## Assumptions / Known limits

- The multi-frame painting is not implemented yet.

## (Optional) Error detection and handling

<!-- Write how to detect errors and how to recover from them.

Example:
This package can handle up to 20 obstacles. If more obstacles found, this node will give up and raise diagnostic errors.
-->

## (Optional) Performance characterization

<!-- Write performance information like complexity. If it wouldn't be the bottleneck, not necessary.

Example:
### Complexity

This algorithm is O(N).

### Processing time

...
-->

## References/External links

[1] Vora, Sourabh, et al. "Pointpainting: Sequential fusion for 3d object detection." Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. 2020.

[2] CVPR'20 Workshop on Scalability in Autonomous Driving] Waymo Open Dataset Challenge: <https://youtu.be/9g9GsI33ol8?t=535>
Ding, Zhuangzhuang, et al. "1st Place Solution for Waymo Open Dataset Challenge--3D Detection and Domain Adaptation." arXiv preprint arXiv:2006.15505 (2020).

## (Optional) Future extensions / Unimplemented parts

<!-- Write future extensions of this package.

Example:
Currently, this package can't handle the chattering obstacles well. We plan to add some probabilistic filters in the perception layer to improve it.
Also, there are some parameters that should be global(e.g. vehicle size, max steering, etc.). These will be refactored and defined as global parameters so that we can share the same parameters between different nodes.
-->
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
// Copyright 2021 TIER IV, Inc.
// Copyright 2022 TIER IV, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
Expand Down Expand Up @@ -72,7 +72,7 @@ class FusionNode : public rclcpp::Node
const sensor_msgs::msg::CameraInfo & camera_info, Msg & output_msg) = 0;

// set args if you need
virtual void postprocess();
virtual void postprocess(Msg & output_msg);

void publish(const Msg & output_msg);

Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,65 @@
// Copyright 2022 TIER IV, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#ifndef IMAGE_PROJECTION_BASED_FUSION__POINTPAINTING_FUSION__NODE_HPP_
#define IMAGE_PROJECTION_BASED_FUSION__POINTPAINTING_FUSION__NODE_HPP_

#include "image_projection_based_fusion/fusion_node.hpp"
#include "image_projection_based_fusion/pointpainting_fusion/pointpainting_trt.hpp"

#include <image_projection_based_fusion/utils/geometry.hpp>
#include <image_projection_based_fusion/utils/utils.hpp>
#include <lidar_centerpoint/centerpoint_trt.hpp>

#include <map>
#include <memory>
#include <string>
#include <vector>

namespace image_projection_based_fusion
{
using Label = autoware_auto_perception_msgs::msg::ObjectClassification;

class PointpaintingFusionNode : public FusionNode<sensor_msgs::msg::PointCloud2, DetectedObjects>
{
public:
explicit PointpaintingFusionNode(const rclcpp::NodeOptions & options);

protected:
void preprocess(sensor_msgs::msg::PointCloud2 & painted_pointcloud_msg) override;

void fuseOnSingleImage(
const sensor_msgs::msg::PointCloud2 & input_pointcloud_msg, const std::size_t image_id,
const DetectedObjectsWithFeature & input_roi_msg,
const sensor_msgs::msg::CameraInfo & camera_info,
sensor_msgs::msg::PointCloud2 & painted_pointcloud_msg) override;

void postprocess(sensor_msgs::msg::PointCloud2 & painted_pointcloud_msg) override;

rclcpp::Publisher<DetectedObjects>::SharedPtr obj_pub_ptr_;

float score_threshold_{0.0};
std::vector<std::string> class_names_;
std::vector<double> pointcloud_range;
bool rename_car_to_truck_and_bus_{false};
bool has_twist_{false};

std::unique_ptr<image_projection_based_fusion::PointPaintingTRT> detector_ptr_{nullptr};

bool out_of_scope(const DetectedObjects & obj);
};

} // namespace image_projection_based_fusion

#endif // IMAGE_PROJECTION_BASED_FUSION__POINTPAINTING_FUSION__NODE_HPP_
Loading