Skip to content

Commit

Permalink
feat(yabloc_pose_initializer): make yabloc independent of dnn model b…
Browse files Browse the repository at this point in the history
…y default (autowarefoundation#4296)

* care no dnn model exists

Signed-off-by: Kento Yabuuchi <kento.yabuuchi.2@tier4.jp>

* modify segmentation_srv to inform inference failure

Signed-off-by: Kento Yabuuchi <kento.yabuuchi.2@tier4.jp>

* add documentation

Signed-off-by: Kento Yabuuchi <kento.yabuuchi.2@tier4.jp>

* fix typo

Signed-off-by: Kento Yabuuchi <kento.yabuuchi.2@tier4.jp>

* ignore DDOWNLOAD

Signed-off-by: Kento Yabuuchi <kento.yabuuchi.2@tier4.jp>

* make const variable be capital

Signed-off-by: Kento Yabuuchi <kento.yabuuchi.2@tier4.jp>

* use std::optional rather than reference arg

Signed-off-by: Kento Yabuuchi <kento.yabuuchi.2@tier4.jp>

---------

Signed-off-by: Kento Yabuuchi <kento.yabuuchi.2@tier4.jp>
  • Loading branch information
KYabuuchi authored and kminoda committed Jul 24, 2023
1 parent 756b073 commit 00161c1
Show file tree
Hide file tree
Showing 9 changed files with 78 additions and 149 deletions.
1 change: 0 additions & 1 deletion localization/yabloc/yabloc_pose_initializer/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -50,7 +50,6 @@ target_link_libraries(${TARGET} ${PCL_LIBRARIES} Sophus::Sophus)
# Semantic segmentation
install(PROGRAMS
src/semantic_segmentation/semantic_segmentation_core.py
src/semantic_segmentation/semantic_segmentation_node.py
src/semantic_segmentation/semantic_segmentation_server.py
DESTINATION lib/${PROJECT_NAME}
)
Expand Down
14 changes: 14 additions & 0 deletions localization/yabloc/yabloc_pose_initializer/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,20 @@ This package contains some nodes related to initial pose estimation.
- [camera_pose_initializer](#camera_pose_initializer)
- [semantic_segmentation_server](#semantic_segmentation_server)

Ideally, this package downloads a pre-trained semantic segmentation model during the build and loads it at runtime for initialization.
However, to handle cases where network connectivity is not available at build time, **the default behavior is not to download the model during build.**
Even if the model is not downloaded, initialization will still complete, but the accuracy may be compromised.

<!-- cspell: ignore DDOWNLOAD_ARTIFACTS -->

To download the model, please specify `--cmake-args -DDOWNLOAD_ARTIFACTS=ON` to the build command.

```bash
colcon build --symlink-install --cmake-args -DCMAKE_BUILD_TYPE=Release -DDOWNLOAD_ARTIFACTS=ON --packages-select yabloc_pose_initializer
```

For detailed information about the downloaded contents, please consult the `download.cmake` file in this package.

## Note

This package makes use of external code. The trained files are provided by apollo. The trained files are automatically downloaded when you build.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -70,7 +70,8 @@ class CameraPoseInitializer : public rclcpp::Node
PoseCovStamped create_rectified_initial_pose(
const Eigen::Vector3f & pos, double yaw_angle_rad, const PoseCovStamped & src_msg);

bool estimate_pose(const Eigen::Vector3f & position, double & yaw_angle_rad, double yaw_std_rad);
std::optional<double> estimate_pose(
const Eigen::Vector3f & position, const double yaw_angle_rad, const double yaw_std_rad);
};
} // namespace yabloc

Expand Down

This file was deleted.

Original file line number Diff line number Diff line change
Expand Up @@ -20,10 +20,6 @@
#include <opencv2/highgui.hpp>
#include <opencv2/imgproc.hpp>

#include <pcl/point_cloud.h>
#include <pcl/point_types.h>
#include <pcl_conversions/pcl_conversions.h>

namespace yabloc
{
CameraPoseInitializer::CameraPoseInitializer()
Expand Down Expand Up @@ -74,7 +70,7 @@ cv::Mat bitwise_and_3ch(const cv::Mat src1, const cv::Mat src2)
return merged;
}

int count_nonzero(cv::Mat image_3ch)
int count_non_zero(cv::Mat image_3ch)
{
std::vector<cv::Mat> images;
cv::split(image_3ch, images);
Expand All @@ -85,20 +81,20 @@ int count_nonzero(cv::Mat image_3ch)
return count;
}

bool CameraPoseInitializer::estimate_pose(
const Eigen::Vector3f & position, double & yaw_angle_rad, double yaw_std_rad)
std::optional<double> CameraPoseInitializer::estimate_pose(
const Eigen::Vector3f & position, const double yaw_angle_rad, const double yaw_std_rad)
{
if (!projector_module_->define_project_func()) {
return false;
return std::nullopt;
}
if (!lane_image_) {
RCLCPP_WARN_STREAM(get_logger(), "vector map is not ready ");
return false;
return std::nullopt;
}
// TODO(KYabuuchi) check latest_image_msg's time stamp, too
if (!latest_image_msg_.has_value()) {
RCLCPP_WARN_STREAM(get_logger(), "source image is not ready");
return false;
return std::nullopt;
}

Image segmented_image;
Expand All @@ -110,10 +106,22 @@ bool CameraPoseInitializer::estimate_pose(
using namespace std::literals::chrono_literals;
std::future_status status = result_future.wait_for(1000ms);
if (status == std::future_status::ready) {
segmented_image = result_future.get()->dst_image;
const auto response = result_future.get();
if (response->success) {
segmented_image = response->dst_image;
} else {
RCLCPP_WARN_STREAM(get_logger(), "segmentation service failed unexpectedly");
// NOTE: Even if the segmentation service fails, the function will still return the
// yaw_angle_rad as it is and complete the initialization. The service fails
// when the DNN model is not downloaded. Ideally, initialization should rely on
// segmentation, but this implementation allows initialization even in cases where network
// connectivity is not available.
return yaw_angle_rad;
}
} else {
RCLCPP_ERROR_STREAM(get_logger(), "segmentation service exited unexpectedly");
return false;
RCLCPP_ERROR_STREAM(
get_logger(), "segmentation service did not return within the expected time");
return std::nullopt;
}
}

Expand Down Expand Up @@ -149,30 +157,18 @@ bool CameraPoseInitializer::estimate_pose(
if (lane_angle_rad) {
gain = 2 + std::cos((lane_angle_rad.value() - angle_rad) / 2.0);
}
const float score = gain * count_nonzero(dst);

// DEBUG:
constexpr bool imshow = false;
if (imshow) {
cv::Mat show_image;
cv::hconcat(std::vector<cv::Mat>{rotated_image, vector_map_image, dst}, show_image);
cv::imshow("and operator", show_image);
cv::waitKey(50);
}
// If count_non_zero() returns 0 everywhere, the orientation is chosen by the only gain
const float score = gain * (1 + count_non_zero(dst));

scores.push_back(score);
angles_rad.push_back(angle_rad);
}

{
size_t max_index =
std::distance(scores.begin(), std::max_element(scores.begin(), scores.end()));
yaw_angle_rad = angles_rad.at(max_index);
}

marker_module_->publish_marker(scores, angles_rad, position);

return true;
const size_t max_index =
std::distance(scores.begin(), std::max_element(scores.begin(), scores.end()));
return angles_rad.at(max_index);
}

void CameraPoseInitializer::on_map(const HADMapBin & msg)
Expand All @@ -193,8 +189,6 @@ void CameraPoseInitializer::on_service(
{
RCLCPP_INFO_STREAM(get_logger(), "CameraPoseInitializer on_service");

response->success = false;

const auto query_pos_with_cov = request->pose_with_covariance;
const auto query_pos = request->pose_with_covariance.pose.pose.position;
const auto orientation = request->pose_with_covariance.pose.pose.orientation;
Expand All @@ -203,12 +197,14 @@ void CameraPoseInitializer::on_service(
RCLCPP_INFO_STREAM(get_logger(), "Given initial position " << pos_vec3f.transpose());

// Estimate orientation
const auto header = request->pose_with_covariance.header;
double yaw_angle_rad = 2 * std::atan2(orientation.z, orientation.w);
if (estimate_pose(pos_vec3f, yaw_angle_rad, yaw_std_rad)) {
const double initial_yaw_angle_rad = 2 * std::atan2(orientation.z, orientation.w);
const auto yaw_angle_rad_opt = estimate_pose(pos_vec3f, initial_yaw_angle_rad, yaw_std_rad);
if (yaw_angle_rad_opt.has_value()) {
response->success = true;
response->pose_with_covariance =
create_rectified_initial_pose(pos_vec3f, yaw_angle_rad, query_pos_with_cov);
create_rectified_initial_pose(pos_vec3f, yaw_angle_rad_opt.value(), query_pos_with_cov);
} else {
response->success = false;
}
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@ void MarkerModule::publish_marker(
auto minmax = std::minmax_element(scores.begin(), scores.end());
auto normalize = [minmax](int score) -> float {
return static_cast<float>(score - *minmax.first) /
static_cast<float>(*minmax.second - *minmax.first);
std::max(1e-4f, static_cast<float>(*minmax.second - *minmax.first));
};

MarkerArray array;
Expand Down

This file was deleted.

Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.

import os
import sys

from cv_bridge import CvBridge
Expand All @@ -23,6 +24,12 @@
from sensor_msgs.msg import Image
from yabloc_pose_initializer.srv import SemanticSegmentation

# cspell: ignore DDOWNLOAD
ERROR_MESSAGE = """\
The yabloc_pose_initializer is not working correctly because the DNN model has not been downloaded correctly.
To download models, "-DDOWNLOAD_ARTIFACTS=ON" is required at build time.
Please see the README of yabloc_pose_initializer for more information."""


class SemanticSegmentationServer(Node):
def __init__(self):
Expand All @@ -31,26 +38,43 @@ def __init__(self):
model_path = self.declare_parameter("model_path", "").value

self.get_logger().info("model path: " + model_path)
self.dnn_ = core.SemanticSegmentationCore(model_path)
self.bridge_ = CvBridge()

if os.path.exists(model_path):
self.dnn_ = core.SemanticSegmentationCore(model_path)
else:
self.dnn_ = None
self.__print_error_message()

self.srv = self.create_service(
SemanticSegmentation, "semantic_segmentation_srv", self.on_service
)

def __print_error_message(self):
messages = ERROR_MESSAGE.split("\n")
for message in messages:
self.get_logger().error(message)

def on_service(self, request, response):
response.dst_image = self.__inference(request.src_image)
if self.dnn_:
response.dst_image = self.__inference(request.src_image)
response.success = True
else:
self.__print_error_message()
response.success = False
response.dst_image = request.src_image

return response

def __inference(self, msg: Image):
stamp = msg.header.stamp
self.get_logger().info("Subscribed image: " + str(stamp))

src_image = self.bridge_.imgmsg_to_cv2(msg)
mask = self.dnn_.inference(src_image)

mask = self.dnn_.inference(src_image)
dst_msg = self.bridge_.cv2_to_imgmsg(mask)
dst_msg.encoding = "bgr8"

return dst_msg


Expand Down
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
sensor_msgs/Image src_image
---
bool success
sensor_msgs/Image dst_image

0 comments on commit 00161c1

Please sign in to comment.