Skip to content

Commit

Permalink
Merge pull request #633 from tier4/sync-upstream
Browse files Browse the repository at this point in the history
chore: sync upstream
  • Loading branch information
tier4-autoware-public-bot[bot] committed Jun 30, 2023
2 parents 0ee9f89 + f5b4053 commit 345065a
Show file tree
Hide file tree
Showing 70 changed files with 2,456 additions and 1,212 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -2,44 +2,84 @@
<launch>
<arg name="enable_image_decompressor" default="true" description="enable image decompressor"/>
<arg name="enable_fine_detection" default="true" description="enable fine position adjustment of traffic light"/>
<arg name="input/image" default="/sensing/camera/traffic_light/image_raw" description="image raw topic name for traffic light"/>
<arg name="input/camera_info" default="/sensing/camera/traffic_light/camera_info" description="camera info topic name for traffic light"/>
<arg name="fine_detector_label_path" default="$(find-pkg-share traffic_light_ssd_fine_detector)/data/voc_labels_tl.txt" description="fine detector label path"/>
<arg name="fine_detector_model_path" default="$(find-pkg-share traffic_light_ssd_fine_detector)/data/mb2-ssd-lite-tlr.onnx" description="fine detector onnx model path"/>
<arg name="fine_detector_label_path" default="$(find-pkg-share traffic_light_fine_detector)/data/tlr_labels.txt" description="fine detector label path"/>
<arg name="fine_detector_model_path" default="$(find-pkg-share traffic_light_fine_detector)/data/tlr_yolox_s.onnx" description="fine detector onnx model path"/>
<arg name="classifier_label_path" default="$(find-pkg-share traffic_light_classifier)/data/lamp_labels.txt" description="classifier label path"/>
<arg name="classifier_model_path" default="$(find-pkg-share traffic_light_classifier)/data/traffic_light_classifier_mobilenetv2.onnx" description="classifier onnx model path"/>
<arg name="classifier_model_path" default="$(find-pkg-share traffic_light_classifier)/data/traffic_light_classifier_efficientNet_b1.onnx" description="classifier onnx model path"/>
<arg name="output/traffic_signals" default="/perception/traffic_light_recognition/traffic_signals"/>
<arg name="use_crosswalk_traffic_light_estimator" default="true" description="output pedestrian's traffic light signals"/>
<let name="namespace1" value="camera6"/>
<let name="namespace2" value="camera7"/>
<let name="all_camera_namespaces" value="[$(var namespace1), $(var namespace2)]"/>
<!-- camera 6 TLR pipeline -->
<group>
<push-ros-namespace namespace="$(var namespace1)"/>
<let name="input/image" value="/sensing/camera/$(var namespace1)/image_raw"/>
<let name="input/camera_info" value="/sensing/camera/$(var namespace1)/camera_info"/>
<let name="map_based_detector_output_topic" value="rough/rois" if="$(var enable_fine_detection)"/>
<let name="map_based_detector_output_topic" value="rois" unless="$(var enable_fine_detection)"/>

<group if="$(var enable_fine_detection)">
<node pkg="topic_tools" exec="relay" name="traffic_light_camra_info_relay" args="$(var input/camera_info) camera_info"/>
<include file="$(find-pkg-share traffic_light_map_based_detector)/launch/traffic_light_map_based_detector.launch.xml">
<arg name="input/camera_info" value="$(var input/camera_info)"/>
<arg name="output/rois" value="rough/rois"/>
<arg name="min_timestamp_offset" value="-0.3"/>
<arg name="expect/rois" value="expect/rois"/>
<arg name="output/rois" value="$(var map_based_detector_output_topic)"/>
<arg name="output/camera_info" value="camera_info"/>
</include>

<include file="$(find-pkg-share tier4_perception_launch)/launch/traffic_light_recognition/traffic_light_node_container.launch.py">
<arg name="input/image" value="$(var input/image)"/>
<arg name="enable_image_decompressor" value="$(var enable_image_decompressor)"/>
<arg name="enable_fine_detection" value="$(var enable_fine_detection)"/>
<arg name="use_crosswalk_traffic_light_estimator" value="$(var use_crosswalk_traffic_light_estimator)"/>
<arg name="use_intra_process" value="true"/>
<arg name="use_multithread" value="true"/>
<arg name="fine_detector_label_path" value="$(var fine_detector_label_path)"/>
<arg name="fine_detector_model_path" value="$(var fine_detector_model_path)"/>
<arg name="classifier_label_path" value="$(var classifier_label_path)"/>
<arg name="classifier_model_path" value="$(var classifier_model_path)"/>
</include>
</group>
<!-- camera 7 TLR pipeline -->
<group>
<push-ros-namespace namespace="$(var namespace2)"/>
<let name="input/image" value="/sensing/camera/$(var namespace2)/image_raw"/>
<let name="input/camera_info" value="/sensing/camera/$(var namespace2)/camera_info"/>
<let name="map_based_detector_output_topic" value="rough/rois" if="$(var enable_fine_detection)"/>
<let name="map_based_detector_output_topic" value="rois" unless="$(var enable_fine_detection)"/>

<group unless="$(var enable_fine_detection)">
<node pkg="topic_tools" exec="relay" name="traffic_light_camra_info_relay" args="$(var input/camera_info) camera_info"/>
<include file="$(find-pkg-share traffic_light_map_based_detector)/launch/traffic_light_map_based_detector.launch.xml">
<arg name="output/rois" value="rois"/>
<arg name="input/camera_info" value="$(var input/camera_info)"/>
<arg name="min_timestamp_offset" value="-0.04"/>
<arg name="expect/rois" value="expect/rois"/>
<arg name="output/rois" value="$(var map_based_detector_output_topic)"/>
<arg name="output/camera_info" value="camera_info"/>
</include>
</group>

<!-- classification -->
<group>
<include file="$(find-pkg-share tier4_perception_launch)/launch/traffic_light_recognition/traffic_light_node_container.launch.py">
<arg name="input/image" value="$(var input/image)"/>
<arg name="enable_image_decompressor" value="$(var enable_image_decompressor)"/>
<arg name="enable_fine_detection" value="$(var enable_fine_detection)"/>
<arg name="use_crosswalk_traffic_light_estimator" value="$(var use_crosswalk_traffic_light_estimator)"/>
<arg name="use_intra_process" value="true"/>
<arg name="use_multithread" value="true"/>
<arg name="label_file" value="$(var fine_detector_label_path)"/>
<arg name="onnx_file" value="$(var fine_detector_model_path)"/>
<arg name="label_file_path" value="$(var classifier_label_path)"/>
<arg name="model_file_path" value="$(var classifier_model_path)"/>
<arg name="fine_detector_label_path" value="$(var fine_detector_label_path)"/>
<arg name="fine_detector_model_path" value="$(var fine_detector_model_path)"/>
<arg name="classifier_label_path" value="$(var classifier_label_path)"/>
<arg name="classifier_model_path" value="$(var classifier_model_path)"/>
</include>
</group>

<!-- traffic_light_multi_camera_fusion -->
<group>
<node pkg="traffic_light_multi_camera_fusion" exec="traffic_light_multi_camera_fusion_node" name="traffic_light_multi_camera_fusion" output="screen">
<param name="camera_namespaces" value="$(var all_camera_namespaces)"/>
<param name="perform_group_fusion" value="true"/>
<remap from="~/input/vector_map" to="/map/vector_map"/>
<remap from="~/output/traffic_signals" to="$(var output/traffic_signals)"/>
</node>
</group>
<!-- visualizer -->
<group>
<include file="$(find-pkg-share traffic_light_visualization)/launch/traffic_light_map_visualizer.launch.xml"/>
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -35,38 +35,39 @@ def add_launch_arg(name: str, default_value=None, description=None):
DeclareLaunchArgument(name, default_value=default_value, description=description)
)

ssd_fine_detector_share_dir = get_package_share_directory("traffic_light_ssd_fine_detector")
fine_detector_share_dir = get_package_share_directory("traffic_light_fine_detector")
classifier_share_dir = get_package_share_directory("traffic_light_classifier")
add_launch_arg("enable_image_decompressor", "True")
add_launch_arg("enable_fine_detection", "True")
add_launch_arg("input/image", "/sensing/camera/traffic_light/image_raw")

# traffic_light_ssd_fine_detector
# traffic_light_fine_detector
add_launch_arg(
"onnx_file", os.path.join(ssd_fine_detector_share_dir, "data", "mb2-ssd-lite-tlr.onnx")
"fine_detector_model_path",
os.path.join(fine_detector_share_dir, "data", "tlr_yolox_s.onnx"),
)
add_launch_arg(
"label_file", os.path.join(ssd_fine_detector_share_dir, "data", "voc_labels_tl.txt")
"fine_detector_label_path",
os.path.join(fine_detector_share_dir, "data", "tlr_labels.txt"),
)
add_launch_arg("dnn_header_type", "pytorch")
add_launch_arg("fine_detector_precision", "FP32")
add_launch_arg("score_thresh", "0.7")
add_launch_arg("max_batch_size", "8")
add_launch_arg("fine_detector_precision", "fp16")
add_launch_arg("fine_detector_score_thresh", "0.3")
add_launch_arg("fine_detector_nms_thresh", "0.65")

add_launch_arg("approximate_sync", "False")
add_launch_arg("mean", "[0.5, 0.5, 0.5]")
add_launch_arg("std", "[0.5, 0.5, 0.5]")

# traffic_light_classifier
add_launch_arg("classifier_type", "1")
add_launch_arg(
"model_file_path",
os.path.join(classifier_share_dir, "data", "traffic_light_classifier_mobilenetv2.onnx"),
"classifier_model_path",
os.path.join(classifier_share_dir, "data", "traffic_light_classifier_efficientNet_b1.onnx"),
)
add_launch_arg(
"classifier_label_path", os.path.join(classifier_share_dir, "data", "lamp_labels.txt")
)
add_launch_arg("label_file_path", os.path.join(classifier_share_dir, "data", "lamp_labels.txt"))
add_launch_arg("precision", "fp16")
add_launch_arg("input_c", "3")
add_launch_arg("input_h", "224")
add_launch_arg("input_w", "224")
add_launch_arg("classifier_precision", "fp16")
add_launch_arg("classifier_mean", "[123.675, 116.28, 103.53]")
add_launch_arg("classifier_std", "[58.395, 57.12, 57.375]")

add_launch_arg("use_crosswalk_traffic_light_estimator", "True")
add_launch_arg("use_intra_process", "False")
Expand All @@ -80,7 +81,7 @@ def create_parameter_dict(*args):

container = ComposableNodeContainer(
name="traffic_light_node_container",
namespace="/perception/traffic_light_recognition",
namespace="",
package="rclcpp_components",
executable=LaunchConfiguration("container_executable"),
composable_node_descriptions=[
Expand All @@ -92,12 +93,11 @@ def create_parameter_dict(*args):
create_parameter_dict(
"approximate_sync",
"classifier_type",
"model_file_path",
"label_file_path",
"precision",
"input_c",
"input_h",
"input_w",
"classifier_model_path",
"classifier_label_path",
"classifier_precision",
"classifier_mean",
"classifier_std",
)
],
remappings=[
Expand Down Expand Up @@ -195,28 +195,25 @@ def create_parameter_dict(*args):
condition=IfCondition(LaunchConfiguration("enable_image_decompressor")),
)

ssd_fine_detector_param = create_parameter_dict(
"onnx_file",
"label_file",
"dnn_header_type",
"score_thresh",
"max_batch_size",
"approximate_sync",
"mean",
"std",
fine_detector_param = create_parameter_dict(
"fine_detector_model_path",
"fine_detector_label_path",
"fine_detector_precision",
"fine_detector_score_thresh",
"fine_detector_nms_thresh",
)
ssd_fine_detector_param["mode"] = LaunchConfiguration("fine_detector_precision")

fine_detector_loader = LoadComposableNodes(
composable_node_descriptions=[
ComposableNode(
package="traffic_light_ssd_fine_detector",
plugin="traffic_light::TrafficLightSSDFineDetectorNodelet",
name="traffic_light_ssd_fine_detector",
parameters=[ssd_fine_detector_param],
package="traffic_light_fine_detector",
plugin="traffic_light::TrafficLightFineDetectorNodelet",
name="traffic_light_fine_detector",
parameters=[fine_detector_param],
remappings=[
("~/input/image", LaunchConfiguration("input/image")),
("~/input/rois", "rough/rois"),
("~/expect/rois", "expect/rois"),
("~/output/rois", "rois"),
],
extra_arguments=[
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -108,7 +108,6 @@ def launch_setup(context, *args, **kwargs):
parameters=[
obstacle_velocity_limiter_param,
vehicle_info_param,
{"obstacles.dynamic_source": "static_only"},
],
extra_arguments=[{"use_intra_process_comms": LaunchConfiguration("use_intra_process")}],
)
Expand Down
59 changes: 56 additions & 3 deletions localization/yabloc/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,10 @@

[![thumbnail](docs/yabloc_thumbnail.jpg)](https://youtu.be/Eaf6r_BNFfk)

It estimates position by matching road surface markings extracted from images with a vector map.
Point cloud maps and LiDAR are not required.
YabLoc enables users localize vehicles that are not equipped with LiDAR and in environments where point cloud maps are not available.

## Packages

- [yabloc_common](yabloc_common/README.md)
Expand All @@ -13,8 +17,8 @@

## How to launch YabLoc instead of NDT

When launching autoware, if you set `localization_mode:=yabloc` as an argument, YabLoc will be launched instead of NDT.
By default, `localization_mode` is `ndt`.
When launching autoware, if you set `localization_mode:=camera` as an argument, YabLoc will be launched instead of NDT.
By default, `localization_mode` is `lidar`.

A sample command to run YabLoc is as follows

Expand All @@ -23,9 +27,58 @@ ros2 launch autoware_launch logging_simulator.launch.xml \
map_path:=$HOME/autoware_map/sample-map-rosbag\
vehicle_model:=sample_vehicle \
sensor_model:=sample_sensor_kit \
localization_mode:=yabloc
localization_mode:=camera
```

## Architecture

![node_diagram](docs/yabloc_architecture.drawio.svg)

## Principle

The diagram below illustrates the basic principle of YabLoc.
It extracts road surface markings by extracting the line segments using the road area obtained from graph-based segmentation.
The red line at the center-top of the diagram represents the line segments identified as road surface markings.
YabLoc transforms these segments for each particle and determines the particle's weight by comparing them with the cost map generated from Lanelet2.

![principle](docs/yabloc_principle.png)

## Visualization

### Core visualization topics

These topics are not visualized by default.

<img src="docs/yabloc_rviz_description.png" width=800>

| index | topic name | description |
| ----- | -------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| 1 | `/localization/yabloc/pf/predicted_particle_marker` | particle distribution of particle filter. Red particles are probable candidate. |
| 2 | `/localization/yabloc/pf/scored_cloud` | 3D projected line segments. the color indicates how well they match the map. |
| 3 | `/localization/yabloc/image_processing/lanelet2_overlay_image` | overlay of lanelet2 (yellow lines) onto image based on estimated pose. If they match well with the actual road markings, it means that the localization performs well. |

### Image topics for debug

These topics are not visualized by default.

<img src="docs/yabloc_image_description.png" width=800>

| index | topic name | description |
| ----- | ----------------------------------------------------------------------- | ----------------------------------------------------------------------------- |
| 1 | `/localization/yabloc/pf/cost_map_image` | cost map made from lanelet2 |
| 2 | `/localization/yabloc/pf/match_image` | projected line segments |
| 3 | `/localization/yabloc/image_processing/image_with_colored_line_segment` | classified line segments. green line segments are used in particle correction |
| 4 | `/localization/yabloc/image_processing/lanelet2_overlay_image` | overlay of lanelet2 |
| 5 | `/localization/yabloc/image_processing/segmented_image` | graph based segmentation result |

## Limitation

- Running YabLoc and NDT simultaneously is not supported.
- This is because running both at the same time may be computationally too expensive.
- Also, in most cases, NDT is superior to YabLoc, so there is less benefit to running them at the same time.
- It does not estimate roll and pitch, therefore some of the perception nodes may not work well.
- It does not support multiple cameras now. But it will in the future.
- In places where there are few road surface markings, such as intersections, the estimation heavily relies on GNSS, IMU, and vehicle's wheel odometry.
- If the road boundary or road surface markings are not included in the Lanelet2, the estimation is likely to fail.
- The sample rosbag provided in the autoware tutorial does not include images, so it is not possible to run YabLoc with it.
- If you want to test the functionality of YabLoc, the sample test data provided in this [PR](https://github.com/autowarefoundation/autoware.universe/pull/3946) is useful.
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Binary file added localization/yabloc/docs/yabloc_principle.png
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Loading

0 comments on commit 345065a

Please sign in to comment.