diff --git a/common/perception_utils/include/perception_utils/prime_synchronizer.hpp b/common/perception_utils/include/perception_utils/prime_synchronizer.hpp index 319c48f6ccc5e..e339849149989 100644 --- a/common/perception_utils/include/perception_utils/prime_synchronizer.hpp +++ b/common/perception_utils/include/perception_utils/prime_synchronizer.hpp @@ -158,7 +158,7 @@ class PrimeSynchronizer * @tparam Idx * @param argv * @return true All messages are not nullptr - * @return false At least one message in the tuplc is nullptr + * @return false At least one message in the topic is nullptr */ template bool isArgvValid( diff --git a/common/tensorrt_common/src/tensorrt_common.cpp b/common/tensorrt_common/src/tensorrt_common.cpp index cbe0df15b9e93..f1e4835d2ba2d 100644 --- a/common/tensorrt_common/src/tensorrt_common.cpp +++ b/common/tensorrt_common/src/tensorrt_common.cpp @@ -416,6 +416,7 @@ bool TrtCommon::buildEngineFromOnnx( first = false; } if (last) { + // cspell: ignore preds if ( contain(name, "reg_preds") || contain(name, "cls_preds") || contain(name, "obj_preds")) { diff --git a/common/tvm_utility/tvm_utility-extras.cmake b/common/tvm_utility/tvm_utility-extras.cmake index a882abee9a0dc..4214aa4995f0f 100644 --- a/common/tvm_utility/tvm_utility-extras.cmake +++ b/common/tvm_utility/tvm_utility-extras.cmake @@ -35,6 +35,7 @@ function(get_neural_network MODEL_NAME MODEL_BACKEND DEPENDENCY) set(PREPROCESSING "") # Prioritize user-provided models. + # cspell: ignore COPYONLY if(IS_DIRECTORY "${DATA_PATH}/user/${MODEL_NAME}") message(STATUS "Using user-provided model from ${DATA_PATH}/user/${MODEL_NAME}") file(REMOVE_RECURSE "${DATA_PATH}/models/${MODEL_NAME}/") diff --git a/perception/ground_segmentation/test/test_ray_ground_filter.cpp b/perception/ground_segmentation/test/test_ray_ground_filter.cpp index d373ed3b623f4..af8bb29ab12b7 100644 --- a/perception/ground_segmentation/test/test_ray_ground_filter.cpp +++ b/perception/ground_segmentation/test/test_ray_ground_filter.cpp @@ -109,13 +109,13 @@ TEST_F(RayGroundFilterComponentTestSuite, TestCase1) // check out_cloud int effect_num = 0; int total_num = 0; - const float min_noground_point_z = 0.1; // z in base_frame + const float min_no_ground_point_z = 0.1; // z in base_frame for (sensor_msgs::PointCloud2ConstIterator iter_x(out_cloud, "x"), iter_y(out_cloud, "y"), iter_z(out_cloud, "z"); iter_x != iter_x.end(); ++iter_x, ++iter_y, ++iter_z) { const float z = *iter_z; total_num += 1; - if (z > min_noground_point_z) { + if (z > min_no_ground_point_z) { effect_num += 1; } } diff --git a/perception/ground_segmentation/test/test_scan_ground_filter.cpp b/perception/ground_segmentation/test/test_scan_ground_filter.cpp index e7386ba3e5978..acc81f8817cd8 100644 --- a/perception/ground_segmentation/test/test_scan_ground_filter.cpp +++ b/perception/ground_segmentation/test/test_scan_ground_filter.cpp @@ -45,7 +45,7 @@ class ScanGroundFilterTest : public ::testing::Test output_pointcloud_pub_ = rclcpp::create_publisher( dummy_node_, "/test_scan_ground_filter/output_cloud", 1); - // no real uages,ScanGroundFilterComponent cosntruct need these params + // no real usages, ScanGroundFilterComponent constructor need these params rclcpp::NodeOptions options; std::vector parameters; parameters.emplace_back(rclcpp::Parameter("wheel_radius", 0.39)); @@ -173,13 +173,13 @@ TEST_F(ScanGroundFilterTest, TestCase1) // check out_cloud int effect_num = 0; int total_num = 0; - const float min_noground_point_z = 0.1; + const float min_no_ground_point_z = 0.1; for (sensor_msgs::PointCloud2ConstIterator iter_x(out_cloud, "x"), iter_y(out_cloud, "y"), iter_z(out_cloud, "z"); iter_x != iter_x.end(); ++iter_x, ++iter_y, ++iter_z) { const float z = *iter_z; total_num += 1; - if (z > min_noground_point_z) { + if (z > min_no_ground_point_z) { effect_num += 1; } } diff --git a/perception/lidar_centerpoint_tvm/lib/postprocess/generate_detected_boxes.cpp b/perception/lidar_centerpoint_tvm/lib/postprocess/generate_detected_boxes.cpp index c372e3b32805c..cbfe2d0c66669 100644 --- a/perception/lidar_centerpoint_tvm/lib/postprocess/generate_detected_boxes.cpp +++ b/perception/lidar_centerpoint_tvm/lib/postprocess/generate_detected_boxes.cpp @@ -146,28 +146,28 @@ void generateDetectedBoxes3D( // construct boxes3d failed std::cerr << "lidar_centerpoint_tvm: construct boxes3d failed" << std::endl; } - std::vector det_boxes3d_nonms(num_det_boxes3d); + std::vector det_boxes3d_no_nms(num_det_boxes3d); std::copy_if( - boxes3d.begin(), boxes3d.end(), det_boxes3d_nonms.begin(), + boxes3d.begin(), boxes3d.end(), det_boxes3d_no_nms.begin(), is_score_greater(config.score_threshold_)); // sort by score - std::sort(det_boxes3d_nonms.begin(), det_boxes3d_nonms.end(), score_greater()); + std::sort(det_boxes3d_no_nms.begin(), det_boxes3d_no_nms.end(), score_greater()); // suppress by NMS std::vector final_keep_mask(num_det_boxes3d); const auto num_final_det_boxes3d = - circleNMS(det_boxes3d_nonms, config.circle_nms_dist_threshold_, final_keep_mask); + circleNMS(det_boxes3d_no_nms, config.circle_nms_dist_threshold_, final_keep_mask); det_boxes3d.resize(num_final_det_boxes3d); std::size_t box_id = 0; for (std::size_t idx = 0; idx < final_keep_mask.size(); idx++) { if (final_keep_mask[idx]) { - det_boxes3d[box_id] = det_boxes3d_nonms[idx]; + det_boxes3d[box_id] = det_boxes3d_no_nms[idx]; box_id++; } } - // std::copy_if(det_boxes3d_nonms.begin(), det_boxes3d_nonms.end(), final_keep_mask.begin(), + // std::copy_if(det_boxes3d_no_nms.begin(), det_boxes3d_no_nms.end(), final_keep_mask.begin(), // det_boxes3d.begin(), is_kept()); } diff --git a/perception/lidar_centerpoint_tvm/lib/preprocess/voxel_generator.cpp b/perception/lidar_centerpoint_tvm/lib/preprocess/voxel_generator.cpp index c2e97e51a72c5..11ae8e7064b1c 100644 --- a/perception/lidar_centerpoint_tvm/lib/preprocess/voxel_generator.cpp +++ b/perception/lidar_centerpoint_tvm/lib/preprocess/voxel_generator.cpp @@ -75,7 +75,7 @@ std::size_t VoxelGenerator::pointsToVoxels( auto pc_msg = pc_cache_iter->pointcloud_msg; auto affine_past2current = pd_ptr_->getAffineWorldToCurrent() * pc_cache_iter->affine_past2world; - float timelag = static_cast( + float time_lag = static_cast( pd_ptr_->getCurrentTimestamp() - rclcpp::Time(pc_msg.header.stamp).seconds()); for (sensor_msgs::PointCloud2ConstIterator x_iter(pc_msg, "x"), y_iter(pc_msg, "y"), @@ -87,7 +87,7 @@ std::size_t VoxelGenerator::pointsToVoxels( point[0] = point_current.x(); point[1] = point_current.y(); point[2] = point_current.z(); - point[3] = timelag; + point[3] = time_lag; out_of_range = false; for (std::size_t di = 0; di < config_.point_dim_size_; di++) { diff --git a/perception/map_based_prediction/src/map_based_prediction_node.cpp b/perception/map_based_prediction/src/map_based_prediction_node.cpp index d3a22f6221855..a7762815f9e69 100644 --- a/perception/map_based_prediction/src/map_based_prediction_node.cpp +++ b/perception/map_based_prediction/src/map_based_prediction_node.cpp @@ -550,7 +550,7 @@ ObjectClassification::_label_type changeLabelForPrediction( label == ObjectClassification::MOTORCYCLE || label == ObjectClassification::BICYCLE) { // if object is within road lanelet and satisfies yaw constraints const bool within_road_lanelet = withinRoadLanelet(object, lanelet_map_ptr_, true); - const float high_speed_threshold = 25.0 / 18.0 * 5.0; // High speed bycicle 25 km/h + const float high_speed_threshold = 25.0 / 18.0 * 5.0; // High speed bicycle 25 km/h const bool high_speed_object = object.kinematics.twist_with_covariance.twist.linear.x > high_speed_threshold; diff --git a/perception/multi_object_tracker/image/anchor_point.drawio.svg b/perception/multi_object_tracker/image/anchor_point.drawio.svg index 72a7cb516dd1b..8e90b9b312a90 100644 --- a/perception/multi_object_tracker/image/anchor_point.drawio.svg +++ b/perception/multi_object_tracker/image/anchor_point.drawio.svg @@ -6,7 +6,7 @@ width="576px" height="331px" viewBox="-0.5 -0.5 576 331" - content="<mxfile><diagram id="28KWLbO92IXZGYeKzApA" name="Page-1">7Vrdb6M4EP9rkHYfGgEGkjyWftxJdydV6kn3uDLgBFSCOeM0yf31N8aGgO1uP5Zs2uvloYWxPWbmNzOese2gq83+F4br/A+akdLx3WzvoGvH9+fhHP4KwkESgvlCEtasyCTJOxLui3+IIrqKui0y0ow6ckpLXtRjYkqriqR8RMOM0d2424qW41lrvCYG4T7FpUn9q8h4LqkLf36k/0qKdd7N7EVL2bLBXWfFoslxRneS1AqHbhx0xSjl8mmzvyKl0F2nF6mB2yda+w9jpOIvGeDLAY+43CrZ1HfxQyfsI2G8ANl/xwkp72hT8IJW0JRQzunGQXHX4bIs1qKB0xqoOd+U8OLBIwhZC2ab/VqYwyzBTZHOWItMvCrK8oqWlIHs1xWtiBjAGX3oVduykDYA0qC4qDIpXQgvGW5ykqlePa/2y1GC00WGen6DFh8FQQijYiU+SED2T6rQ64EBgyZ0Qzg7QBc1YKGMUtkyUhp3d0fLCBQpHxpFpIhYGeO653zECx4UZHb40EeHz0DGhd/t7VmRmQKYwAAGp3yLy1akvEgBHUBJaEXHC8TlY/1jBUwKYhNmQWxTZJkYHjMCLHHSshKqrmlR8VaKMHbCa8Fry6kCwzPUr+AbepEiTQCG54/RCCITjb7PEA40ARyL5/2ks/IdPrRqE9p5IDzNlS6FVu5V94aWgnNn1lGnasJuHonUuGdVv0W1OmaMcsyHGBJWgLwCeGUBd0dK3FS4/pPeyQ+2QKo8KoqmAXGpeVRgYmiD0J8AwqUB4W3BGi6WfobTh6JafxpXCsMRCn2cG6CwPJEjdRnYMLBVaQ4CggCtFb4jEIbGf6KgFmoOgcKfB4X3fFAjVXYp8l2h7RI3sG5rS/szwYJkRiKsqQamo1uWjhJ0U10DdYQWdXQ0RkrMi8fxlDYdqRlk3OvR8LUlBi00NctPvevt9PYJRp6nMQo1RhyzNeEGoxayXuyXoWhJ4SL3ixNe5Zg783jvzIEGbNyedDBJ8MBzwrHZslOk7r2E96/fKqdbfd+Hp540XEZ6uDR9tM/5pl61PDMRNPT+vjP08xdYKHpbHj9JheWFHx3Ak5ZY58Umeh6b/3RaPxmK8/Pl9d4rarNVSfYqn4mtqU0DCzP/bsazohW3adF0qE72aLaQ06l9SH+2QH43l43WM/FngRqqk16aXX03k/Jcm2O50+RSkDvNIjS2imX0tnTKxivUeE2YUZmV4ueKClMV+0ZUWP68qOD/X2cOoQjQ+erMl2wyn77OPFdNGfjL2XL4C7SVcjmbL98aFl/NerooadmhrsiuFIolDS82mEPS70el8KkEvC5at08J3UMP5VHvyAVPWkAG7riCRJYKst8yGNpgMIX7mQUIwzsg0KQhLMFtrfFZkEDuOA72Ae45JCYJhHMzEJ5+qwYeAN/YE42faNMGaTurvq0kWZwq+7DVJDIWivpBRMB+KYv+3orD+lgtaqvVkCSjpsVK4CNjIew3arUY0XYQbXoAntiYJHtQhxSrm+OT2FigR5O5LcE9VTSxlSkvszHxSRYbA6yvSQnG4LvlR0f3qTJ9isii59KRuZqfajsYmWXNjwSWUQrgYm6LFxHeiM3GKmnEP72I+sAmMkk6oR0NWEzBussyRWKHXnd+p6QebccPABH0O8wBjKql+O5xX77bffI/dCWmX+cJ3nq6pzPqQ/z0VRayHf8oHx0h3Xl2QllG2IWKApftUPbl4mJI/yr9WosFsTgyaKcmeODXSe/ULbffCKmd4ZUJV48ILqfiy7Z1DZ7bOO0VxVrcVkpzXK3N+0qMbqustUjh3bu84OS+xu2B8I5h/ZzDZn2uO403a7B6lgtGge0s4g3ODK/HO5rSLo4XXdHNvw==</diagram></mxfile>" + content="<mxfile><diagram id="28KWLbO92IXZGYeKzApA" name="Page-1">7Vrdb6M4EP9rkHYfGgEGkjyWfuxJdydV6kn3uDLgBKsO5ozzdX/92WAI2O626ZFNcr08tDC2B8/8ZsYzth1wt9p9Y7DMf6cZIo7vZjsH3Du+D3wg/krCXhFCRVgynDUk70B4xn8jRXQVdY0zVA06ckoJx+WQmNKiQCkf0CBjdDvstqBk+NUSLpFBeE4hMal/4oznDXXmTw/0XxBe5u2XvWjetKxg21mxqHKY0W1DqoUDDw64Y5Ty5mm1u0NE6q7VS6OBx1dau4kxVPD3DPCbARtI1ko2NS++b4XdIMaxkP03mCDyRCvMMS1EU0I5pysHxG2HW4KXsoHTUlBzviLixROPQshSMlvtltIcJgmscDphNTLxAhNyRwllQvb7ghZIDuCMvnSqrVk0NiCkATEuska6ULxksMpRpnp1vOqZgwSmswx0/HotPgiCUIyKTX0pFUqh0K5HUvr7hugKcbYXXVTrTBlla8tK4+72YBmBIuV9o4gUESpjXHacD3iJBwWZHT5w7fAZyLji9/h4VmTGACYwgIEpX0NSzz/HqUBHoCS1ouMlZOND/UMFTCo0gZgFsRXOMjk8ZkiwhEnNSqq6pLjgtRRh7IT3kteaUwWGZ6hfwdf3IkUaAQzPH6IRRCYaXZ8+HGAEOGZv+0lr5Vu4r9UmtfOCeJorXUqtPKvuFSWSc2vWUatqxB42qNG4Z1W/RbU6ZoxyyPsYIoaFvBJ4ZQFPB0pcFbD8gz41E7ZAqjwqisYBca55VGBiaIPQHwHCuQHhI2YVl0s/g+kLLpafxpXCcIBCF+d6KMxP5EhtBtYPbEWaCwGFTLUVXhAIfeM/UVALNYcA4c+DwjOgMJSPiuxW5rtS2wRWYt3Wlvbjg0VF1yzV8nGUDfJlU109dYQWdbQ0hgjkeDPMsm06Ul9o4l6Hhq8tMWCmqbmZ+1Nnp4+vMPI8jVGoMeKQLRE3GNWQdWK/D0VLChe5X5zwLofcmcY7Zypogo3bkfYmSTzwHHFotmwVqX0n4v3r98JpV9/L8NSThstID5emj3Y539irlmcmgobeLztDP3+BBaKP5fGjVFheeO0AnrTEOi820dvY/KfT+tFQnJ4vr/eOqM0WBO1UPhNbU5tKLMz8hxnPghbcpkXToVrZo8ms+Zzah/QnM+C337LROib+JFBDddKrmB2TSXmuzbHccXIpkTtNIjC0inn0sXTKxivUeI2YUZmV4ueKCmMV+0ZUmP+8qOD/X2f2oQjA+erM92wyn6DOvJCaMvDnk3n/F2gr5XwynX80LB7NerwoadmhLtCWSMWiiuMV5CLp9yMifSoRXhct66eE7kQP5VEX5IInLSADd1hBAksF2W0Z9G0wGMP9zAKEwa0g0KRCbAPrWuOzIAHcYRzsAtxbSIwSCKdmIDz9Vo14EPjGnmz8RJs2QNtZ9W0lyexU2YetJmlioawfZATslrLor7U8rI/VorZY9ElN1LRYiZhkLIX9Tq0WI9v2sk0PwCMbU8NeqKMRq/3GJ7GxQI8mU1uCe6poYitT3mdjckoWGxNY3yMijMF3ybWj+1qZPkZk0XPpyFzNT7UdDMyy5t8ElkEK4EJuixcRXMnNxiKp5D+9iLpiExklndCOBiymYN1lGSOxA8ed3ympB9vxPUAk/QlyAUZRU3z3sC/f7j75V12J6dd5go+e7umMuhA/fpUFbMc/ykcHSLeenVCWIXajosBtPZR9ubnp0782fq3FglgeGdSfRrDn10nn1DW3XxEqnf6VCVePCC6ncmbrshSeWzn1FcVS3lZKc1gszftKjK6LrLZI6d3bHHP0XML6hHjLoH7OYbM+1x3HmzVYPcsFo8B2FvEBZxavhzuajV0cLrqCh38A</diagram></mxfile>" > @@ -79,44 +79,44 @@ > - + @@ -126,76 +126,76 @@ - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + @@ -266,11 +266,11 @@
-
raw obserbation
+
raw observation
- raw obserbation + raw observation @@ -293,52 +293,52 @@ > - + @@ -348,83 +348,83 @@ - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + @@ -461,56 +461,56 @@ > - + @@ -520,105 +520,105 @@ - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + @@ -656,11 +656,11 @@ > @@ -668,10 +668,10 @@ - + - + @@ -694,7 +694,7 @@
- obserbation at + observation at
anchor point
@@ -702,7 +702,7 @@
- obserbation at... + observation at... diff --git a/perception/occupancy_grid_map_outlier_filter/include/occupancy_grid_map_outlier_filter/occupancy_grid_map_outlier_filter_nodelet.hpp b/perception/occupancy_grid_map_outlier_filter/include/occupancy_grid_map_outlier_filter/occupancy_grid_map_outlier_filter_nodelet.hpp index af8d91dee8c1d..953307e9d5380 100644 --- a/perception/occupancy_grid_map_outlier_filter/include/occupancy_grid_map_outlier_filter/occupancy_grid_map_outlier_filter_nodelet.hpp +++ b/perception/occupancy_grid_map_outlier_filter/include/occupancy_grid_map_outlier_filter/occupancy_grid_map_outlier_filter_nodelet.hpp @@ -47,10 +47,10 @@ using sensor_msgs::msg::PointCloud2; using std_msgs::msg::Header; using PclPointCloud = pcl::PointCloud; -class RadiusSearch2dfilter +class RadiusSearch2dFilter { public: - explicit RadiusSearch2dfilter(rclcpp::Node & node); + explicit RadiusSearch2dFilter(rclcpp::Node & node); void filter( const PclPointCloud & input, const Pose & pose, PclPointCloud & output, PclPointCloud & outlier); @@ -114,7 +114,7 @@ class OccupancyGridMapOutlierFilterComponent : public rclcpp::Node std::shared_ptr tf2_listener_; // 2d outlier filter - std::shared_ptr radius_search_2d_filter_ptr_; + std::shared_ptr radius_search_2d_filter_ptr_; // Debugger std::shared_ptr debugger_ptr_; diff --git a/perception/occupancy_grid_map_outlier_filter/src/occupancy_grid_map_outlier_filter_nodelet.cpp b/perception/occupancy_grid_map_outlier_filter/src/occupancy_grid_map_outlier_filter_nodelet.cpp index 0cbaead35c720..72136c20fddb8 100644 --- a/perception/occupancy_grid_map_outlier_filter/src/occupancy_grid_map_outlier_filter_nodelet.cpp +++ b/perception/occupancy_grid_map_outlier_filter/src/occupancy_grid_map_outlier_filter_nodelet.cpp @@ -101,7 +101,7 @@ boost::optional getCost( namespace occupancy_grid_map_outlier_filter { -RadiusSearch2dfilter::RadiusSearch2dfilter(rclcpp::Node & node) +RadiusSearch2dFilter::RadiusSearch2dFilter(rclcpp::Node & node) { search_radius_ = node.declare_parameter("radius_search_2d_filter.search_radius", 1.0f); min_points_and_distance_ratio_ = @@ -113,7 +113,7 @@ RadiusSearch2dfilter::RadiusSearch2dfilter(rclcpp::Node & node) kd_tree_ = pcl::make_shared>(false); } -void RadiusSearch2dfilter::filter( +void RadiusSearch2dFilter::filter( const PclPointCloud & input, const Pose & pose, PclPointCloud & output, PclPointCloud & outlier) { const auto & xyz_cloud = input; @@ -125,7 +125,7 @@ void RadiusSearch2dfilter::filter( } std::vector k_indices(xy_cloud->points.size()); - std::vector k_dists(xy_cloud->points.size()); + std::vector k_distances(xy_cloud->points.size()); kd_tree_->setInputCloud(xy_cloud); for (size_t i = 0; i < xy_cloud->points.size(); ++i) { const float distance = @@ -134,7 +134,7 @@ void RadiusSearch2dfilter::filter( std::max(static_cast(min_points_and_distance_ratio_ / distance + 0.5f), min_points_), max_points_); const int points_num = - kd_tree_->radiusSearch(i, search_radius_, k_indices, k_dists, min_points_threshold); + kd_tree_->radiusSearch(i, search_radius_, k_indices, k_distances, min_points_threshold); if (min_points_threshold <= points_num) { output.points.push_back(xyz_cloud.points.at(i)); @@ -144,13 +144,13 @@ void RadiusSearch2dfilter::filter( } } -void RadiusSearch2dfilter::filter( +void RadiusSearch2dFilter::filter( const PclPointCloud & high_conf_input, const PclPointCloud & low_conf_input, const Pose & pose, PclPointCloud & output, PclPointCloud & outlier) { const auto & high_conf_xyz_cloud = high_conf_input; const auto & low_conf_xyz_cloud = low_conf_input; - // check the limit points nunber + // check the limit points number if (low_conf_xyz_cloud.points.size() > max_filter_points_nb_) { RCLCPP_WARN( rclcpp::get_logger("OccupancyGridMapOutlierFilterComponent"), @@ -170,7 +170,7 @@ void RadiusSearch2dfilter::filter( } std::vector k_indices(xy_cloud->points.size()); - std::vector k_dists(xy_cloud->points.size()); + std::vector k_distances(xy_cloud->points.size()); kd_tree_->setInputCloud(xy_cloud); for (size_t i = 0; i < low_conf_xyz_cloud.points.size(); ++i) { const float distance = @@ -179,7 +179,7 @@ void RadiusSearch2dfilter::filter( std::max(static_cast(min_points_and_distance_ratio_ / distance + 0.5f), min_points_), max_points_); const int points_num = - kd_tree_->radiusSearch(i, search_radius_, k_indices, k_dists, min_points_threshold); + kd_tree_->radiusSearch(i, search_radius_, k_indices, k_distances, min_points_threshold); if (min_points_threshold <= points_num) { output.points.push_back(low_conf_xyz_cloud.points.at(i)); @@ -226,7 +226,7 @@ OccupancyGridMapOutlierFilterComponent::OccupancyGridMapOutlierFilterComponent( /* Radius search 2d filter */ if (use_radius_search_2d_filter) { - radius_search_2d_filter_ptr_ = std::make_shared(*this); + radius_search_2d_filter_ptr_ = std::make_shared(*this); } /* debugger */ if (enable_debugger) { diff --git a/perception/probabilistic_occupancy_grid_map/pointcloud-based-occupancy-grid-map.md b/perception/probabilistic_occupancy_grid_map/pointcloud-based-occupancy-grid-map.md index 626d8fde6ebdf..7d2efb3468f3a 100644 --- a/perception/probabilistic_occupancy_grid_map/pointcloud-based-occupancy-grid-map.md +++ b/perception/probabilistic_occupancy_grid_map/pointcloud-based-occupancy-grid-map.md @@ -6,8 +6,8 @@ ### 1st step -First of all, input obstacle/raw pointcloud are transformed into the polar coordinate centered around `scan_origin` and divided int ocircular bins per angle_increment respectively. -At this time, each point belonging to each bin is stored as range data. In addition, the x,y information in the map coordinate is also stored for raytracing on the map coordinate. +First of all, input obstacle/raw pointcloud are transformed into the polar coordinate centered around `scan_origin` and divided int circular bins per angle_increment respectively. +At this time, each point belonging to each bin is stored as range data. In addition, the x,y information in the map coordinate is also stored for ray-tracing on the map coordinate. The bin contains the following information for each point - range data from origin of raytrace @@ -30,7 +30,7 @@ The ray trace is done by Bresenham's line algorithm. ![pointcloud_based_occupancy_grid_map_side_view_1st](./image/pointcloud_based_occupancy_grid_map_side_view_1st.svg) 2. Fill in the unknown cells. - Based on the assumption that `UNKNOWN` is behind the obstacle, the cells that are more than a distance margin from each obstacle point are filled with `UNKOWN` + Based on the assumption that `UNKNOWN` is behind the obstacle, the cells that are more than a distance margin from each obstacle point are filled with `UNKNOWN` ![pointcloud_based_occupancy_grid_map_side_view_2nd](./image/pointcloud_based_occupancy_grid_map_side_view_2nd.svg) diff --git a/perception/probabilistic_occupancy_grid_map/src/pointcloud_based_occupancy_grid_map/occupancy_grid_map_fixed.cpp b/perception/probabilistic_occupancy_grid_map/src/pointcloud_based_occupancy_grid_map/occupancy_grid_map_fixed.cpp index cf861ffddfd08..b07076de8342a 100644 --- a/perception/probabilistic_occupancy_grid_map/src/pointcloud_based_occupancy_grid_map/occupancy_grid_map_fixed.cpp +++ b/perception/probabilistic_occupancy_grid_map/src/pointcloud_based_occupancy_grid_map/occupancy_grid_map_fixed.cpp @@ -141,7 +141,7 @@ void OccupancyGridMapFixedBlindSpot::updateWithPointCloud( occupancy_cost_value::FREE_SPACE); } - // Second step: Add uknown cell + // Second step: Add unknown cell for (size_t bin_index = 0; bin_index < obstacle_pointcloud_angle_bins.size(); ++bin_index) { auto & obstacle_pointcloud_angle_bin = obstacle_pointcloud_angle_bins.at(bin_index); auto & raw_pointcloud_angle_bin = raw_pointcloud_angle_bins.at(bin_index); diff --git a/perception/probabilistic_occupancy_grid_map/src/pointcloud_based_occupancy_grid_map/occupancy_grid_map_projective.cpp b/perception/probabilistic_occupancy_grid_map/src/pointcloud_based_occupancy_grid_map/occupancy_grid_map_projective.cpp index e2282f7c711c8..9556b0a93cc93 100644 --- a/perception/probabilistic_occupancy_grid_map/src/pointcloud_based_occupancy_grid_map/occupancy_grid_map_projective.cpp +++ b/perception/probabilistic_occupancy_grid_map/src/pointcloud_based_occupancy_grid_map/occupancy_grid_map_projective.cpp @@ -202,7 +202,7 @@ void OccupancyGridMapProjectiveBlindSpot::updateWithPointCloud( if (pub_debug_grid_) converter.addLayerFromCostmap2D(*this, "filled_free_to_farthest", debug_grid_); - // Second step: Add uknown cell + // Second step: Add unknown cell for (size_t bin_index = 0; bin_index < obstacle_pointcloud_angle_bins.size(); ++bin_index) { const auto & obstacle_pointcloud_angle_bin = obstacle_pointcloud_angle_bins.at(bin_index); const auto & raw_pointcloud_angle_bin = raw_pointcloud_angle_bins.at(bin_index); diff --git a/perception/radar_fusion_to_detected_object/docs/radar_fusion_to_detected_object_2.drawio.svg b/perception/radar_fusion_to_detected_object/docs/radar_fusion_to_detected_object_2.drawio.svg index 13c6b848da61c..4e833473a79ea 100644 --- a/perception/radar_fusion_to_detected_object/docs/radar_fusion_to_detected_object_2.drawio.svg +++ b/perception/radar_fusion_to_detected_object/docs/radar_fusion_to_detected_object_2.drawio.svg @@ -23,8 +23,8 @@
- Twist o - bserved from radars + Twist + observed from radars
diff --git a/perception/radar_object_clustering/docs/clustering.drawio.svg b/perception/radar_object_clustering/docs/clustering.drawio.svg index 64563a5f28ecd..e50efdbe371a5 100644 --- a/perception/radar_object_clustering/docs/clustering.drawio.svg +++ b/perception/radar_object_clustering/docs/clustering.drawio.svg @@ -6,7 +6,7 @@ width="769px" height="360px" viewBox="-0.5 -0.5 769 360" - content="<mxfile><diagram id="iXHs5RmmCZOnBjEk0gil" name="Page-1">7VvLsqM2EP0aL0PpgXgs72M82aQqVTdVSVYpBoRNgsEB+V47Xx9hJBtJYGMMHs+MvbiDGknA6XOk7oaZ4ZfV9nMRrJe/5BFNZwhE2xl+nSEECeR/K8OuNrgI1IZFkUSiz9HwlvxHhVF22yQRLZWOLM9TlqxVY5hnGQ2ZYguKIv9Qu8V5ql51HSyoYXgLg9S0/p5EbFlbPQKO9p9psljKK0MgzqwC2VkYymUQ5R8NE/40wy9FnrP6aLV9oWmFncSlHjfvOHu4sYJmrM8AVA94D9KNeDZxX2wnH5YP4LjyxvPHMmH0bR2E1ZkP7lluW7JVyluQHwblugY7TraUz/8cJ2n6kqd5wW1ZnlVTlKzI/6HSOEN4Pgdgzu/u2bxz8TDvtGB02zCJJ/lM8xVlxY53kWc9ux4iaIVty/Fry8fRTcj3LFfMvWx4qWkPBEEWh2scMeQHAsZ2SPF5SLnX19VhnNLtU8VH/vw0i8Tha5gGZZmEKroGcs7+dzgjiUgq4POMCdVAR7QvwpxGigBMxBt4StY3sZS2gqYBS95V2bSBK67wa57wOzk4lCBggcYPKu5FUlVywjLfFCEVczRJr0/rk0umZUGxoMyYds+DAyS9qGGfp0aRb7KoUs8rOK+4Imcc3jzjTRcYTEA9JViRYSwJEoIt7Ch4QkJMDbaRxibXi4/0WM96CU2CaGsgcrxiEGAQDEPyToTluB53U5cCoDdQWA45OS3SHDyesJzvXlgYqnsbdIjlmMKaSlfuQ1d9dIWxZ6Hm1kI0YdmqPvAwnWF4+jLIP3mZ8XTnTac7jwzWXUUWAMbRHdJiShfcTHP+V9XceRDvRXPAVzzka9D3FhXQsofJwkCZxk4hG2f4djWfe2As2WCELEL8409doZDjWMQ2T99AVRCeB39SWZ0D+V5kZTuW29hEkOpAj1gy6rpYaEhLuGw0mdL61DfanF3ye2CX5eOHrJo7tNj9IbrtG39WMraIbL5uhazr1k60OhlR4yHAui+SYHwyjxi6FHtmFoknSx5gW8XGSVnl5HWQKWxx/t1Upbl9ReWncl9ieeIdIFlv9x6U5/nRQvybyv6zqiAp6XLsiOMYgDg2x0YJZ2EW0r/YsqDlsipSitn4E9UTqhf5UugW3rF+BGnWuM9Xc3aK1mKraNk9gjRZVPtNyMlKuf252huSMEifxIlVEkVpV9VQ3d1G2G9s0Kvu4LRIQd/nB20qPSo7l1ZPuwt+zfKeLPe19jPdNgLUsnQtYXaRATN0PRPng/EqoB8Fnl4LM3TUYBb52h7bdymGSI2uoT7RiOtwjyrOIzLb+/ZU9n+Fq7E6EZouAxqtnnTN+nj/jla1h5yh8dTpbAyS6Rzdo0L0cDR0tdXaHViAx9pqjch0UXOP8hQrkiBbdEaBZ8POAXGOSMT2C3mzLEJGesWMgeU3Eh6gOw5anlhFFepYPjbZ41puN1P6RkSorZh04+ylemOqj60dj0CUxDEHnPv9kbx0kkrbeH3zqwWnJaIeI3NBQ8thAyokmEuTP+pZYR8EfPfrNlRzocMb/otDL728Jtvjr9vSvz0dLhQTBeVyT3l4Kt5G1+zW38xXKXotGwx1OtbfDvqTOb3HR0kPp5+UOtCTI3eg121tIjid1NuqUjcMDTAPDqqtTR/7TtM8TNjuER30iQ60subh87RbBAdt1TbF7+cJ5BkEaieLWDEMshRBFHDng/zL31V9tIsm36v3kaeGGNUbF9sgAHRcy7YnIkFbWe7GJAjTTcm9wYH9YYngoK9OBLNo91uFbgfkrTgboLYA3/DV1ZAb+LZ4oRtyDXHSsvKO80KJN4/f/Nc7/fE/TuBP/wM=</diagram></mxfile>" + content="<mxfile><diagram id="iXHs5RmmCZOnBjEk0gil" name="Page-1">7VtNk6M2EP01PobSBxJwnI/15pKqVE2qkpxSLMg2CQYH5Bk7vz6SkWwksI0xeJyNfZhFjSTg9XtSd8NO8Mty87UIV4uf8pilEwTizQS/ThAilIq/0rCtDBR6lWFeJHFlggfDW/IPU0agrOskZqXRked5ypOVaYzyLGMRN2xhUeQfZrdZnppXXYVz1jC8RWHatP6axHxRWX0CDvYfWTJf6CtDoM4sQ91ZGcpFGOcfNRP+MsEvRZ7z6mi5eWGpxE7jUo2bHjm7v7GCZbzLAFQNeA/TtXo2dV98qx9WDBC4isbzxyLh7G0VRvLMh/CssC34MhUtKA7DclWBPUs2TMz/PEvS9CVP80LYsjyTU5S8yP9i2jhBeDoFYCru7rl55+ph3lnB2aZmUk/yleVLxout6KLP+m41RNEKuw4NKsvHwU0o8B1Pzb2oealuDxVB5vtrHDAUBwrGdkjxeUiF11fycJayzZPko3h+lsXq8DVKw7JMIhPdBnJ099uf0UQkEvg840o1kKr2RZiz2BBAE/Eanpr1dSy1rWBpyJN3UzZt4Kor/Jwn4k72DiUIOKD2g4Z7kVaVnrDM10XE1Bx10tvTBuSSaXlYzBlvTLvjwR6STtRwz1OjyNdZLNXzCs4rrsi5gDfPRNMDDSagjhKUZBhKgoRgB1MDT0hIU4NtpHHJ9eIjHdazTkLTILoWiAKvGQgxCPsheSfCop4v3HRMAdDvKSxKTk6LLAcPJyz63QsLQ3Nvg5Q4tCmssXTlPXTVRVcY+w6qby3EEpZr6gP30xmGpy+DgpOXGU53/ni680lv3UmyADCM7pAVU3rgZpoLPlVz50G8F82BwPBQYEHfWVTAyh5GCwN1GjuGbGj/7Wo69cFQssEIOYQEh5+5QiFKHeI2T99AVRCeB39UWZ0D+V5k5VLHq20iyHSgTxwddV0sNGQlXC4aTWld6httzi7FPfDL8vF9Vi0cWmx/U912jd+ljB2im68bJeuqtVWto4yo8FBg3RdJMD6ZR/Rdiv1mFolHSx5gW8WGplw6eRVmBlvo32tZmttVVH4odyWWJ9EBktVm50F9XhzN1b+p7j+RBUlNl0NHPJsBMJs1x8aJYGEWsT/4omDlQhYp1WziiaoJzYt8K2yL6Fg9gjZb3BerOT9Fa7VVtOweYZrM5X4TCbIyYX+We0MShemTOrFM4jg9VjU0d7cB9hsXdKo70BYp2Pt8r02lQ2Xn0urp8YJfvbyny32t/ZpuGwBqXbrWMHuoATP0/CbOe+NVQD8KPJ0WZkjNYBYF1h7bdSmGyIyuoT3RgOtwhyrOIzLb+fZU9n+Fq7E5ERovAxqsnnTN+nj/jja1h2jfeOp0NgbJeI7uUCF6OBp61mrt9SzAY2u1RmS8qLlDeYoXSZjNj0aBZ8POHnGOSsR2C3m9LEIGesWMgRPUEh5gOw46vlpFDeo4AW6yx3O840zpGhGhtmLSjbMX+cbUHls5HoE4mc2YQFw4/pG9HGWVtfMGzc8WaEtIPUTqgvrWw3qUSLDQpnjUs8reK/juF25oJkP7V/wXx152fU23h1+4tX87OlwpJg7LxY7y8FTAja7Zrv8zn6XYxWzQ1+nYfj0YjOb0Dl8lPZx+UurAzo68nl53rYngeFJvK0vdMDbAIjqQW5s99p2leZTw7SM86PYyzSxs7j9Qu0V00FZvMxx/nkF+g0HtbFFLRoMtRRiHwvkg//anrJAeo8n36n3kmzGGfOfiNggAqee47kgkaCvM3ZgEUbouhTcEsP9bIlD06URolu1+kegegbwV5waoLcDXfPXJkFuIk5aVd5hXSqJ5+Oq/2uoP/3UCf/kX</diagram></mxfile>" > @@ -77,14 +77,14 @@
- angle diffrence + angle difference
- angle diffr... + angle diffe... @@ -100,7 +100,7 @@
- velocity diffrence + velocity difference
diff --git a/perception/tensorrt_classifier/include/tensorrt_classifier/calibrator.hpp b/perception/tensorrt_classifier/include/tensorrt_classifier/calibrator.hpp index e8c03d132679a..b4d687c3c4fd9 100644 --- a/perception/tensorrt_classifier/include/tensorrt_classifier/calibrator.hpp +++ b/perception/tensorrt_classifier/include/tensorrt_classifier/calibrator.hpp @@ -178,8 +178,8 @@ class ImageStream /**Percentile calibration using legacy calibrator*/ /** * @class Int8LegacyCalibrator - * @brief Calibrator for Percentle - * @warning We are confirming bug on Tegra like Xavier and Orin. We recommand use EntropyV2 or + * @brief Calibrator for Percentile + * @warning We are confirming bug on Tegra like Xavier and Orin. We recommend use EntropyV2 or * MinMax calibrator */ class Int8LegacyCalibrator : public nvinfer1::IInt8LegacyCalibrator @@ -193,7 +193,7 @@ class Int8LegacyCalibrator : public nvinfer1::IInt8LegacyCalibrator double quantile = 0.999999, double cutoff = 0.999999) : stream_(stream), calibration_cache_file_(calibration_cache_file), - histogranm_cache_file_(histogram_cache_file), + histogram_cache_file_(histogram_cache_file), read_cache_(read_cache) { auto d = stream_.getInputDims(); @@ -206,16 +206,16 @@ class Int8LegacyCalibrator : public nvinfer1::IInt8LegacyCalibrator auto algType = getAlgorithm(); switch (algType) { case (nvinfer1::CalibrationAlgoType::kLEGACY_CALIBRATION): - std::cout << "CalibratioAlgoType : kLEGACY_CALIBRATION" << std::endl; + std::cout << "CalibrationAlgoType : kLEGACY_CALIBRATION" << std::endl; break; case (nvinfer1::CalibrationAlgoType::kENTROPY_CALIBRATION): - std::cout << "CalibratioAlgoType : kENTROPY_CALIBRATION" << std::endl; + std::cout << "CalibrationAlgoType : kENTROPY_CALIBRATION" << std::endl; break; case (nvinfer1::CalibrationAlgoType::kENTROPY_CALIBRATION_2): - std::cout << "CalibratioAlgoType : kENTROPY_CALIBRATION_2" << std::endl; + std::cout << "CalibrationAlgoType : kENTROPY_CALIBRATION_2" << std::endl; break; case (nvinfer1::CalibrationAlgoType::kMINMAX_CALIBRATION): - std::cout << "CalibratioAlgoType : kMINMAX_CALIBRATION" << std::endl; + std::cout << "CalibrationAlgoType : kMINMAX_CALIBRATION" << std::endl; break; default: std::cout << "No CalibrationAlgType" << std::endl; @@ -285,7 +285,7 @@ class Int8LegacyCalibrator : public nvinfer1::IInt8LegacyCalibrator const void * readHistogramCache(std::size_t & length) noexcept { hist_cache_.clear(); - std::ifstream input(histogranm_cache_file_, std::ios::binary); + std::ifstream input(histogram_cache_file_, std::ios::binary); input >> std::noskipws; if (read_cache_ && input.good()) { std::copy( @@ -303,14 +303,14 @@ class Int8LegacyCalibrator : public nvinfer1::IInt8LegacyCalibrator } void writeHistogramCache(void const * ptr, std::size_t length) noexcept { - std::ofstream output(histogranm_cache_file_, std::ios::binary); + std::ofstream output(histogram_cache_file_, std::ios::binary); output.write(reinterpret_cast(ptr), length); } private: ImageStream stream_; const std::string calibration_cache_file_; - const std::string histogranm_cache_file_; + const std::string histogram_cache_file_; bool read_cache_{true}; size_t input_count_; void * device_input_{nullptr}; @@ -326,7 +326,7 @@ class Int8LegacyCalibrator : public nvinfer1::IInt8LegacyCalibrator /** * @class Int8LegacyCalibrator - * @brief Calibrator for Percentle + * @brief Calibrator for Percentile * */ class Int8EntropyCalibrator : public nvinfer1::IInt8EntropyCalibrator2 @@ -347,16 +347,16 @@ class Int8EntropyCalibrator : public nvinfer1::IInt8EntropyCalibrator2 auto algType = getAlgorithm(); switch (algType) { case (nvinfer1::CalibrationAlgoType::kLEGACY_CALIBRATION): - std::cout << "CalibratioAlgoType : kLEGACY_CALIBRATION" << std::endl; + std::cout << "CalibrationAlgoType : kLEGACY_CALIBRATION" << std::endl; break; case (nvinfer1::CalibrationAlgoType::kENTROPY_CALIBRATION): - std::cout << "CalibratioAlgoType : kENTROPY_CALIBRATION" << std::endl; + std::cout << "CalibrationAlgoType : kENTROPY_CALIBRATION" << std::endl; break; case (nvinfer1::CalibrationAlgoType::kENTROPY_CALIBRATION_2): - std::cout << "CalibratioAlgoType : kENTROPY_CALIBRATION_2" << std::endl; + std::cout << "CalibrationAlgoType : kENTROPY_CALIBRATION_2" << std::endl; break; case (nvinfer1::CalibrationAlgoType::kMINMAX_CALIBRATION): - std::cout << "CalibratioAlgoType : kMINMAX_CALIBRATION" << std::endl; + std::cout << "CalibrationAlgoType : kMINMAX_CALIBRATION" << std::endl; break; default: std::cout << "No CalibrationAlgType" << std::endl; @@ -447,16 +447,16 @@ class Int8MinMaxCalibrator : public nvinfer1::IInt8MinMaxCalibrator auto algType = getAlgorithm(); switch (algType) { case (nvinfer1::CalibrationAlgoType::kLEGACY_CALIBRATION): - std::cout << "CalibratioAlgoType : kLEGACY_CALIBRATION" << std::endl; + std::cout << "CalibrationAlgoType : kLEGACY_CALIBRATION" << std::endl; break; case (nvinfer1::CalibrationAlgoType::kENTROPY_CALIBRATION): - std::cout << "CalibratioAlgoType : kENTROPY_CALIBRATION" << std::endl; + std::cout << "CalibrationAlgoType : kENTROPY_CALIBRATION" << std::endl; break; case (nvinfer1::CalibrationAlgoType::kENTROPY_CALIBRATION_2): - std::cout << "CalibratioAlgoType : kENTROPY_CALIBRATION_2" << std::endl; + std::cout << "CalibrationAlgoType : kENTROPY_CALIBRATION_2" << std::endl; break; case (nvinfer1::CalibrationAlgoType::kMINMAX_CALIBRATION): - std::cout << "CalibratioAlgoType : kMINMAX_CALIBRATION" << std::endl; + std::cout << "CalibrationAlgoType : kMINMAX_CALIBRATION" << std::endl; break; default: std::cout << "No CalibrationAlgType" << std::endl; diff --git a/perception/tensorrt_classifier/include/tensorrt_classifier/preprocess.h b/perception/tensorrt_classifier/include/tensorrt_classifier/preprocess.h index febf909b8826e..55295a57ad5b8 100644 --- a/perception/tensorrt_classifier/include/tensorrt_classifier/preprocess.h +++ b/perception/tensorrt_classifier/include/tensorrt_classifier/preprocess.h @@ -47,11 +47,11 @@ extern void resize_bilinear_gpu( /** * @brief Letterbox a image on gpus - * @param[out] dst letterboxed image + * @param[out] dst letterbox-ed image * @param[in] src image - * @param[in] d_w width for letterboxing - * @param[in] d_h height foletterboxing - * @param[in] d_c channel foletterboxing + * @param[in] d_w width for letterbox-ing + * @param[in] d_h height for letterbox-ing + * @param[in] d_c channel for letterbox-ing * @param[in] s_w width for input image * @param[in] s_h height for input image * @param[in] s_c channel for input image @@ -87,7 +87,7 @@ extern void toFloat_gpu( /** * @brief Resize and letterbox a image using bilinear interpolation on gpus - * @param[out] dst processsed image + * @param[out] dst processed image * @param[in] src image * @param[in] d_w width for output * @param[in] d_h height for output @@ -104,7 +104,7 @@ extern void resize_bilinear_letterbox_gpu( /** * @brief Optimized preprocessing including resize, letterbox, nhwc2nchw, toFloat and normalization * for YOLOX on gpus - * @param[out] dst processsed image + * @param[out] dst processed image * @param[in] src image * @param[in] d_w width for output * @param[in] d_h height for output @@ -122,7 +122,7 @@ extern void resize_bilinear_letterbox_nhwc_to_nchw32_gpu( /** * @brief Optimized preprocessing including resize, letterbox, nhwc2nchw, toFloat and normalization * with batching for YOLOX on gpus - * @param[out] dst processsed image + * @param[out] dst processed image * @param[in] src image * @param[in] d_w width for output * @param[in] d_h height for output @@ -141,7 +141,7 @@ extern void resize_bilinear_letterbox_nhwc_to_nchw32_batch_gpu( /** * @brief Optimized preprocessing including crop, resize, letterbox, nhwc2nchw, toFloat and * normalization with batching for YOLOX on gpus - * @param[out] dst processsed image + * @param[out] dst processed image * @param[in] src image * @param[in] d_w width for output * @param[in] d_h height for output @@ -161,7 +161,7 @@ extern void crop_resize_bilinear_letterbox_nhwc_to_nchw32_batch_gpu( /** * @brief Optimized multi-scale preprocessing including crop, resize, letterbox, nhwc2nchw, toFloat * and normalization with batching for YOLOX on gpus - * @param[out] dst processsed image + * @param[out] dst processed image * @param[in] src image * @param[in] d_w width for output * @param[in] d_h height for output diff --git a/perception/tensorrt_classifier/include/tensorrt_classifier/tensorrt_classifier.hpp b/perception/tensorrt_classifier/include/tensorrt_classifier/tensorrt_classifier.hpp index 7e2c852d369fb..d95ee52e8fae9 100644 --- a/perception/tensorrt_classifier/include/tensorrt_classifier/tensorrt_classifier.hpp +++ b/perception/tensorrt_classifier/include/tensorrt_classifier/tensorrt_classifier.hpp @@ -72,7 +72,8 @@ class TrtClassifier * @param[in] images batched images */ bool doInference( - const std::vector & images, std::vector & results, std::vector & probs); + const std::vector & images, std::vector & results, + std::vector & probabilities); /** * @brief allocate buffer for preprocess on GPU @@ -85,7 +86,7 @@ class TrtClassifier private: /** - * @brief run preprcess including resizing, letterbox, BGR2RGB, NHWC2NCHW and toFloat on CPU + * @brief run preprocess including resizing, letterbox, BGR2RGB, NHWC2NCHW and toFloat on CPU * @param[in] images batching images */ void preprocess_opt(const std::vector & images); @@ -98,7 +99,8 @@ class TrtClassifier void preprocessGpu(const std::vector & images); bool feedforwardAndDecode( - const std::vector & images, std::vector & results, std::vector & probs); + const std::vector & images, std::vector & results, + std::vector & probabilities); std::unique_ptr trt_common_; @@ -116,11 +118,11 @@ class TrtClassifier // std for preprocessing std::vector std_; std::vector inv_std_; - // flg for preprecessing on GPU + // flg for preprocessing on GPU bool m_cuda; - // host buffer for preprecessing on GPU + // host buffer for preprocessing on GPU unsigned char * h_img_; - // device buffer for preprecessing on GPU + // device buffer for preprocessing on GPU unsigned char * d_img_; int src_width_; int src_height_; diff --git a/perception/tensorrt_classifier/src/tensorrt_classifier.cpp b/perception/tensorrt_classifier/src/tensorrt_classifier.cpp index e9bde8cc4b4f8..b7c4ce99fa2e9 100644 --- a/perception/tensorrt_classifier/src/tensorrt_classifier.cpp +++ b/perception/tensorrt_classifier/src/tensorrt_classifier.cpp @@ -198,7 +198,7 @@ TrtClassifier::~TrtClassifier() void TrtClassifier::initPreprocessBuffer(int width, int height) { - // if size of source input has benn changed... + // if size of source input has been changed... if (src_width_ != -1 || src_height_ != -1) { if (width != src_width_ || height != src_height_) { // Free cuda memory to reallocate @@ -243,7 +243,7 @@ void TrtClassifier::preprocessGpu(const std::vector & images) input_dims.d[0] = batch_size; for (const auto & image : images) { - // if size of source input has benn changed... + // if size of source input has been changed... int width = image.cols; int height = image.rows; if (src_width_ != -1 || src_height_ != -1) { @@ -342,21 +342,23 @@ void TrtClassifier::preprocess_opt(const std::vector & images) } bool TrtClassifier::doInference( - const std::vector & images, std::vector & results, std::vector & probs) + const std::vector & images, std::vector & results, + std::vector & probabilities) { if (!trt_common_->isInitialized()) { return false; } preprocess_opt(images); - return feedforwardAndDecode(images, results, probs); + return feedforwardAndDecode(images, results, probabilities); } bool TrtClassifier::feedforwardAndDecode( - const std::vector & images, std::vector & results, std::vector & probs) + const std::vector & images, std::vector & results, + std::vector & probabilities) { results.clear(); - probs.clear(); + probabilities.clear(); std::vector buffers = {input_d_.get(), out_prob_d_.get()}; trt_common_->enqueueV2(buffers.data(), *stream_, nullptr); @@ -377,7 +379,7 @@ bool TrtClassifier::feedforwardAndDecode( index = j; } } - probs.push_back(max); + probabilities.push_back(max); results.push_back(index); } return true; diff --git a/perception/tensorrt_yolo/src/nodelet.cpp b/perception/tensorrt_yolo/src/nodelet.cpp index ef5bd501286f5..dcb65114ad88f 100644 --- a/perception/tensorrt_yolo/src/nodelet.cpp +++ b/perception/tensorrt_yolo/src/nodelet.cpp @@ -28,13 +28,13 @@ namespace { std::vector getFilePath(const std::string & input_dir) { - glob_t globbuf; + glob_t glob_buffer; std::vector files; - glob((input_dir + "*").c_str(), 0, NULL, &globbuf); - for (size_t i = 0; i < globbuf.gl_pathc; i++) { - files.push_back(globbuf.gl_pathv[i]); + glob((input_dir + "*").c_str(), 0, NULL, &glob_buffer); + for (size_t i = 0; i < glob_buffer.gl_pathc; i++) { + files.push_back(glob_buffer.gl_pathv[i]); } - globfree(&globbuf); + globfree(&glob_buffer); return files; } } // namespace diff --git a/perception/tensorrt_yolox/include/tensorrt_yolox/calibrator.hpp b/perception/tensorrt_yolox/include/tensorrt_yolox/calibrator.hpp index d859cb5c9c9c5..be7d0f188ff9e 100644 --- a/perception/tensorrt_yolox/include/tensorrt_yolox/calibrator.hpp +++ b/perception/tensorrt_yolox/include/tensorrt_yolox/calibrator.hpp @@ -160,8 +160,8 @@ class ImageStream /**Percentile calibration using legacy calibrator*/ /** * @class Int8LegacyCalibrator - * @brief Calibrator for Percentle - * @warning We are confirming bug on Tegra like Xavier and Orin. We recommand use MinMax calibrator + * @brief Calibrator for Percentile + * @warning We are confirming bug on Tegra like Xavier and Orin. We recommend use MinMax calibrator */ class Int8LegacyCalibrator : public nvinfer1::IInt8LegacyCalibrator { @@ -172,7 +172,7 @@ class Int8LegacyCalibrator : public nvinfer1::IInt8LegacyCalibrator double quantile = 0.999999, double cutoff = 0.999999) : stream_(stream), calibration_cache_file_(calibration_cache_file), - histogranm_cache_file_(histogram_cache_file), + histogram_cache_file_(histogram_cache_file), read_cache_(read_cache) { auto d = stream_.getInputDims(); @@ -184,16 +184,16 @@ class Int8LegacyCalibrator : public nvinfer1::IInt8LegacyCalibrator auto algType = getAlgorithm(); switch (algType) { case (nvinfer1::CalibrationAlgoType::kLEGACY_CALIBRATION): - std::cout << "CalibratioAlgoType : kLEGACY_CALIBRATION" << std::endl; + std::cout << "CalibrationAlgoType : kLEGACY_CALIBRATION" << std::endl; break; case (nvinfer1::CalibrationAlgoType::kENTROPY_CALIBRATION): - std::cout << "CalibratioAlgoType : kENTROPY_CALIBRATION" << std::endl; + std::cout << "CalibrationAlgoType : kENTROPY_CALIBRATION" << std::endl; break; case (nvinfer1::CalibrationAlgoType::kENTROPY_CALIBRATION_2): - std::cout << "CalibratioAlgoType : kENTROPY_CALIBRATION_2" << std::endl; + std::cout << "CalibrationAlgoType : kENTROPY_CALIBRATION_2" << std::endl; break; case (nvinfer1::CalibrationAlgoType::kMINMAX_CALIBRATION): - std::cout << "CalibratioAlgoType : kMINMAX_CALIBRATION" << std::endl; + std::cout << "CalibrationAlgoType : kMINMAX_CALIBRATION" << std::endl; break; default: std::cout << "No CalibrationAlgType" << std::endl; @@ -263,7 +263,7 @@ class Int8LegacyCalibrator : public nvinfer1::IInt8LegacyCalibrator const void * readHistogramCache(std::size_t & length) noexcept { hist_cache_.clear(); - std::ifstream input(histogranm_cache_file_, std::ios::binary); + std::ifstream input(histogram_cache_file_, std::ios::binary); input >> std::noskipws; if (read_cache_ && input.good()) { std::copy( @@ -281,14 +281,14 @@ class Int8LegacyCalibrator : public nvinfer1::IInt8LegacyCalibrator } void writeHistogramCache(void const * ptr, std::size_t length) noexcept { - std::ofstream output(histogranm_cache_file_, std::ios::binary); + std::ofstream output(histogram_cache_file_, std::ios::binary); output.write(reinterpret_cast(ptr), length); } private: ImageStream stream_; const std::string calibration_cache_file_; - const std::string histogranm_cache_file_; + const std::string histogram_cache_file_; bool read_cache_{true}; size_t input_count_; void * device_input_{nullptr}; @@ -301,7 +301,7 @@ class Int8LegacyCalibrator : public nvinfer1::IInt8LegacyCalibrator /** * @class Int8LegacyCalibrator - * @brief Calibrator for Percentle + * @brief Calibrator for Percentile * @warning This calibrator causes crucial accuracy drop for YOLOX. */ class Int8EntropyCalibrator : public nvinfer1::IInt8EntropyCalibrator2 @@ -319,16 +319,16 @@ class Int8EntropyCalibrator : public nvinfer1::IInt8EntropyCalibrator2 auto algType = getAlgorithm(); switch (algType) { case (nvinfer1::CalibrationAlgoType::kLEGACY_CALIBRATION): - std::cout << "CalibratioAlgoType : kLEGACY_CALIBRATION" << std::endl; + std::cout << "CalibrationAlgoType : kLEGACY_CALIBRATION" << std::endl; break; case (nvinfer1::CalibrationAlgoType::kENTROPY_CALIBRATION): - std::cout << "CalibratioAlgoType : kENTROPY_CALIBRATION" << std::endl; + std::cout << "CalibrationAlgoType : kENTROPY_CALIBRATION" << std::endl; break; case (nvinfer1::CalibrationAlgoType::kENTROPY_CALIBRATION_2): - std::cout << "CalibratioAlgoType : kENTROPY_CALIBRATION_2" << std::endl; + std::cout << "CalibrationAlgoType : kENTROPY_CALIBRATION_2" << std::endl; break; case (nvinfer1::CalibrationAlgoType::kMINMAX_CALIBRATION): - std::cout << "CalibratioAlgoType : kMINMAX_CALIBRATION" << std::endl; + std::cout << "CalibrationAlgoType : kMINMAX_CALIBRATION" << std::endl; break; default: std::cout << "No CalibrationAlgType" << std::endl; @@ -397,7 +397,7 @@ class Int8EntropyCalibrator : public nvinfer1::IInt8EntropyCalibrator2 /** * @class Int8MinMaxCalibrator * @brief Calibrator for MinMax - * @warning We strongly recommand MinMax calibrator for YOLOX + * @warning We strongly recommend MinMax calibrator for YOLOX */ class Int8MinMaxCalibrator : public nvinfer1::IInt8MinMaxCalibrator { @@ -414,16 +414,16 @@ class Int8MinMaxCalibrator : public nvinfer1::IInt8MinMaxCalibrator auto algType = getAlgorithm(); switch (algType) { case (nvinfer1::CalibrationAlgoType::kLEGACY_CALIBRATION): - std::cout << "CalibratioAlgoType : kLEGACY_CALIBRATION" << std::endl; + std::cout << "CalibrationAlgoType : kLEGACY_CALIBRATION" << std::endl; break; case (nvinfer1::CalibrationAlgoType::kENTROPY_CALIBRATION): - std::cout << "CalibratioAlgoType : kENTROPY_CALIBRATION" << std::endl; + std::cout << "CalibrationAlgoType : kENTROPY_CALIBRATION" << std::endl; break; case (nvinfer1::CalibrationAlgoType::kENTROPY_CALIBRATION_2): - std::cout << "CalibratioAlgoType : kENTROPY_CALIBRATION_2" << std::endl; + std::cout << "CalibrationAlgoType : kENTROPY_CALIBRATION_2" << std::endl; break; case (nvinfer1::CalibrationAlgoType::kMINMAX_CALIBRATION): - std::cout << "CalibratioAlgoType : kMINMAX_CALIBRATION" << std::endl; + std::cout << "CalibrationAlgoType : kMINMAX_CALIBRATION" << std::endl; break; default: std::cout << "No CalibrationAlgType" << std::endl; diff --git a/perception/tensorrt_yolox/include/tensorrt_yolox/preprocess.hpp b/perception/tensorrt_yolox/include/tensorrt_yolox/preprocess.hpp index 830cf184e10e8..3549ae35e70ea 100644 --- a/perception/tensorrt_yolox/include/tensorrt_yolox/preprocess.hpp +++ b/perception/tensorrt_yolox/include/tensorrt_yolox/preprocess.hpp @@ -49,11 +49,11 @@ extern void resize_bilinear_gpu( /** * @brief Letterbox a image on gpus - * @param[out] dst letterboxed image + * @param[out] dst letterbox-ed image * @param[in] src image - * @param[in] d_w width for letterboxing - * @param[in] d_h height foletterboxing - * @param[in] d_c channel foletterboxing + * @param[in] d_w width for letterbox-ing + * @param[in] d_h height for letterbox-ing + * @param[in] d_c channel for letterbox-ing * @param[in] s_w width for input image * @param[in] s_h height for input image * @param[in] s_c channel for input image @@ -89,7 +89,7 @@ extern void to_float_gpu( /** * @brief Resize and letterbox a image using bilinear interpolation on gpus - * @param[out] dst processsed image + * @param[out] dst processed image * @param[in] src image * @param[in] d_w width for output * @param[in] d_h height for output @@ -106,7 +106,7 @@ extern void resize_bilinear_letterbox_gpu( /** * @brief Optimized preprocessing including resize, letterbox, nhwc2nchw, toFloat and normalization * for YOLOX on gpus - * @param[out] dst processsed image + * @param[out] dst processed image * @param[in] src image * @param[in] d_w width for output * @param[in] d_h height for output @@ -124,7 +124,7 @@ extern void resize_bilinear_letterbox_nhwc_to_nchw32_gpu( /** * @brief Optimized preprocessing including resize, letterbox, nhwc2nchw, toFloat and normalization * with batching for YOLOX on gpus - * @param[out] dst processsed image + * @param[out] dst processed image * @param[in] src image * @param[in] d_w width for output * @param[in] d_h height for output @@ -143,7 +143,7 @@ extern void resize_bilinear_letterbox_nhwc_to_nchw32_batch_gpu( /** * @brief Optimized preprocessing including crop, resize, letterbox, nhwc2nchw, toFloat and * normalization with batching for YOLOX on gpus - * @param[out] dst processsed image + * @param[out] dst processed image * @param[in] src image * @param[in] d_w width for output * @param[in] d_h height for output @@ -163,7 +163,7 @@ extern void crop_resize_bilinear_letterbox_nhwc_to_nchw32_batch_gpu( /** * @brief Optimized multi-scale preprocessing including crop, resize, letterbox, nhwc2nchw, toFloat * and normalization with batching for YOLOX on gpus - * @param[out] dst processsed image + * @param[out] dst processed image * @param[in] src image * @param[in] d_w width for output * @param[in] d_h height for output diff --git a/perception/tensorrt_yolox/include/tensorrt_yolox/tensorrt_yolox.hpp b/perception/tensorrt_yolox/include/tensorrt_yolox/tensorrt_yolox.hpp index fd50e67143d18..c42222a70c96b 100644 --- a/perception/tensorrt_yolox/include/tensorrt_yolox/tensorrt_yolox.hpp +++ b/perception/tensorrt_yolox/include/tensorrt_yolox/tensorrt_yolox.hpp @@ -132,7 +132,7 @@ class TrtYoloX private: /** - * @brief run preprcess including resizing, letterbox, NHWC2NCHW and toFloat on CPU + * @brief run preprocess including resizing, letterbox, NHWC2NCHW and toFloat on CPU * @param[in] images batching images */ void preprocess(const std::vector & images); @@ -144,7 +144,7 @@ class TrtYoloX void preprocessGpu(const std::vector & images); /** - * @brief run preprcess including resizing, letterbox, NHWC2NCHW and toFloat on CPU + * @brief run preprocess including resizing, letterbox, NHWC2NCHW and toFloat on CPU * @param[in] images batching images * @param[in] rois region of interest */ @@ -159,18 +159,18 @@ class TrtYoloX const std::vector & images, const std::vector & rois); /** - * @brief run multi-scale preprcess including resizing, letterbox, NHWC2NCHW and toFloat on CPU + * @brief run multi-scale preprocess including resizing, letterbox, NHWC2NCHW and toFloat on CPU * @param[in] images batching images * @param[in] rois region of interest */ void multiScalePreprocess(const cv::Mat & image, const std::vector & rois); /** - * @brief run multi-scale preprcess including resizing, letterbox, NHWC2NCHW and toFloat on GPU + * @brief run multi-scale preprocess including resizing, letterbox, NHWC2NCHW and toFloat on GPU * @param[in] images batching images * @param[in] rois region of interest */ - void multiScalepreprocessGpu(const cv::Mat & image, const std::vector & rois); + void multiScalePreprocessGpu(const cv::Mat & image, const std::vector & rois); bool multiScaleFeedforward(const cv::Mat & image, int batch_size, ObjectArrays & objects); bool multiScaleFeedforwardAndDecode( diff --git a/perception/tensorrt_yolox/src/tensorrt_yolox.cpp b/perception/tensorrt_yolox/src/tensorrt_yolox.cpp index 32d905eed2870..ea7857c3768aa 100644 --- a/perception/tensorrt_yolox/src/tensorrt_yolox.cpp +++ b/perception/tensorrt_yolox/src/tensorrt_yolox.cpp @@ -433,7 +433,7 @@ void TrtYoloX::preprocessWithRoiGpu( input_dims.d[0] = batch_size; for (const auto & image : images) { - // if size of source input has benn changed... + // if size of source input has been changed... int width = image.cols; int height = image.rows; if (src_width_ != -1 || src_height_ != -1) { @@ -551,14 +551,14 @@ void TrtYoloX::preprocessWithRoi( // No Need for Sync } -void TrtYoloX::multiScalepreprocessGpu(const cv::Mat & image, const std::vector & rois) +void TrtYoloX::multiScalePreprocessGpu(const cv::Mat & image, const std::vector & rois) { const auto batch_size = rois.size(); auto input_dims = trt_common_->getBindingDimensions(0); input_dims.d[0] = batch_size; - // if size of source input has benn changed... + // if size of source input has been changed... int width = image.cols; int height = image.rows; if (src_width_ != -1 || src_height_ != -1) { @@ -683,7 +683,7 @@ bool TrtYoloX::doMultiScaleInference( return false; } if (use_gpu_preprocess_) { - multiScalepreprocessGpu(image, rois); + multiScalePreprocessGpu(image, rois); } else { multiScalePreprocess(image, rois); } diff --git a/perception/traffic_light_classifier/CMakeLists.txt b/perception/traffic_light_classifier/CMakeLists.txt index c4b62a932dcfd..72dba86a00d33 100644 --- a/perception/traffic_light_classifier/CMakeLists.txt +++ b/perception/traffic_light_classifier/CMakeLists.txt @@ -65,6 +65,7 @@ else() set(CUDNN_AVAIL OFF) endif() +# cspell: ignore EFFICIENTNET, MOBILENET # Download caffemodel and prototxt set(EFFICIENTNET_BATCH_1_HASH 82baba4fcf1abe0c040cd55005e34510) set(EFFICIENTNET_BATCH_4_HASH 21b549c2fe4fbb20d32cc019e6d70cd7) diff --git a/perception/traffic_light_classifier/README.md b/perception/traffic_light_classifier/README.md index e2faf1633d331..53ba0d3be7ba2 100644 --- a/perception/traffic_light_classifier/README.md +++ b/perception/traffic_light_classifier/README.md @@ -8,7 +8,7 @@ traffic_light_classifier is a package for classifying traffic light labels using ### cnn_classifier -Traffic light labels are classified by EfficientNet-b1 or MobiletNet-v2. +Traffic light labels are classified by EfficientNet-b1 or MobileNet-v2. Totally 83400 (58600 for training, 14800 for evaluation and 10000 for test) TIER IV internal images of Japanese traffic lights were used for fine-tuning. The information of the models is listed here: @@ -188,11 +188,13 @@ Example: ... --> + + ## References/External links [1] M. Sandler, A. Howard, M. Zhu, A. Zhmoginov and L. Chen, "MobileNetV2: Inverted Residuals and Linear Bottlenecks," 2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition, Salt Lake City, UT, 2018, pp. 4510-4520, doi: 10.1109/CVPR.2018.00474. -[2] Tan, Mingxing, and Quoc Le. "Efficientnet: Rethinking model scaling for convolutional neural networks." International conference on machine learning. PMLR, 2019. +[2] Tan, Mingxing, and Quoc Le. "EfficientNet: Rethinking model scaling for convolutional neural networks." International conference on machine learning. PMLR, 2019. ## (Optional) Future extensions / Unimplemented parts diff --git a/perception/traffic_light_classifier/src/cnn_classifier.cpp b/perception/traffic_light_classifier/src/cnn_classifier.cpp index 323b90ee1ce61..680314d98d794 100644 --- a/perception/traffic_light_classifier/src/cnn_classifier.cpp +++ b/perception/traffic_light_classifier/src/cnn_classifier.cpp @@ -85,14 +85,14 @@ bool CNNClassifier::getTrafficSignals( } } if (static_cast(image_batch.size()) == batch_size_) { - std::vector probs; + std::vector probabilities; std::vector classes; - bool res = classifier_->doInference(image_batch, classes, probs); - if (!res || classes.empty() || probs.empty()) { + bool res = classifier_->doInference(image_batch, classes, probabilities); + if (!res || classes.empty() || probabilities.empty()) { return false; } for (size_t i = 0; i < true_batch_size; i++) { - postProcess(classes[i], probs[i], traffic_signals.signals[signal_i]); + postProcess(classes[i], probabilities[i], traffic_signals.signals[signal_i]); /* debug */ if (0 < image_pub_.getNumSubscribers()) { outputDebugImage(image_batch[i], traffic_signals.signals[signal_i]); diff --git a/perception/traffic_light_classifier/src/single_image_debug_inference_node.cpp b/perception/traffic_light_classifier/src/single_image_debug_inference_node.cpp index 179d63b8b3797..34510d53f9ce3 100644 --- a/perception/traffic_light_classifier/src/single_image_debug_inference_node.cpp +++ b/perception/traffic_light_classifier/src/single_image_debug_inference_node.cpp @@ -111,6 +111,7 @@ class SingleImageDebugInferenceNode : public rclcpp::Node void inferWithCrop(int action, int x, int y, [[maybe_unused]] int flags) { + // cspell: ignore LBUTTONDOWN, LBUTTONUP if (action == cv::EVENT_LBUTTONDOWN) { top_left_corner_ = cv::Point(x, y); } else if (action == cv::EVENT_LBUTTONUP) { diff --git a/perception/traffic_light_fine_detector/include/traffic_light_fine_detector/nodelet.hpp b/perception/traffic_light_fine_detector/include/traffic_light_fine_detector/nodelet.hpp index 4d14874a58242..f2f1f17be6894 100644 --- a/perception/traffic_light_fine_detector/include/traffic_light_fine_detector/nodelet.hpp +++ b/perception/traffic_light_fine_detector/include/traffic_light_fine_detector/nodelet.hpp @@ -100,9 +100,9 @@ class TrafficLightFineDetectorNodelet : public rclcpp::Node * @brief Every traffic light roi might have several possible detections. This function * is designed to select the best detection for every traffic light by making use of * the relative positions between the traffic lights projected on the image (expect/rois). - * To be specified, for every detection, all the expect rois will be transfered so that + * To be specified, for every detection, all the expect rois will be transferred so that * this detection will match the corresponding expect roi. Note that the expect rois - * of other traffic lights will also be transfered equally. Then, for every expect roi, + * of other traffic lights will also be transferred equally. Then, for every expect roi, * it will calculate the match score (which is IoU_detection_roi * detection_confidence) * with every detection. * The combination of detections that will get highest match score sum will be the selected diff --git a/perception/traffic_light_map_based_detector/src/node.cpp b/perception/traffic_light_map_based_detector/src/node.cpp index 99ec2ddb607ad..fcf13e3900793 100644 --- a/perception/traffic_light_map_based_detector/src/node.cpp +++ b/perception/traffic_light_map_based_detector/src/node.cpp @@ -473,6 +473,7 @@ void MapBasedDetector::getVisibleTrafficLights( } // check within image frame + // cspell: ignore tltl tf2::Vector3 tf_camera2tltl = tf_map2camera.inverse() * getTrafficLightTopLeft(traffic_light); tf2::Vector3 tf_camera2tlbr = tf_map2camera.inverse() * getTrafficLightBottomRight(traffic_light); diff --git a/perception/traffic_light_multi_camera_fusion/README.md b/perception/traffic_light_multi_camera_fusion/README.md index 238c9b6507ad0..2f225f1540f63 100644 --- a/perception/traffic_light_multi_camera_fusion/README.md +++ b/perception/traffic_light_multi_camera_fusion/README.md @@ -5,7 +5,7 @@ `traffic_light_multi_camera_fusion` performs traffic light signal fusion which can be summarized as the following two tasks: 1. Multi-Camera-Fusion: performed on single traffic light signal detected by different cameras. -2. Group-Fusion: performed on traffic light signals within the same group, which means traffic lights sharing the same regulatory element id defined in lanetlet2 map. +2. Group-Fusion: performed on traffic light signals within the same group, which means traffic lights sharing the same regulatory element id defined in lanelet2 map. ## Input topics diff --git a/perception/traffic_light_multi_camera_fusion/include/traffic_light_multi_camera_fusion/node.hpp b/perception/traffic_light_multi_camera_fusion/include/traffic_light_multi_camera_fusion/node.hpp index bc63f34b191e7..4cd5569129a00 100644 --- a/perception/traffic_light_multi_camera_fusion/include/traffic_light_multi_camera_fusion/node.hpp +++ b/perception/traffic_light_multi_camera_fusion/include/traffic_light_multi_camera_fusion/node.hpp @@ -87,31 +87,31 @@ class MultiCameraFusion : public rclcpp::Node void mapCallback(const autoware_auto_mapping_msgs::msg::HADMapBin::ConstSharedPtr input_msg); - void multiCameraFusion(std::map & fusioned_record_map); + void multiCameraFusion(std::map & fused_record_map); void convertOutputMsg( const std::map & grouped_record_map, NewSignalArrayType & msg_out); void groupFusion( - std::map & fusioned_record_map, + std::map & fused_record_map, std::map & grouped_record_map); typedef mf::sync_policies::ExactTime ExactSyncPolicy; typedef mf::Synchronizer ExactSync; typedef mf::sync_policies::ApproximateTime - ApproSyncPolicy; - typedef mf::Synchronizer ApproSync; + ApproximateSyncPolicy; + typedef mf::Synchronizer ApproximateSync; std::vector>> signal_subs_; std::vector>> roi_subs_; std::vector>> cam_info_subs_; std::vector> exact_sync_subs_; - std::vector> appro_sync_subs_; + std::vector> approximate_sync_subs_; rclcpp::Subscription::SharedPtr map_sub_; rclcpp::Publisher::SharedPtr signal_pub_; /* - the mappping from traffic light id (instance id) to regulatory element id (group id) + the mapping from traffic light id (instance id) to regulatory element id (group id) */ std::map traffic_light_id_to_regulatory_ele_id_; /* diff --git a/perception/traffic_light_multi_camera_fusion/src/node.cpp b/perception/traffic_light_multi_camera_fusion/src/node.cpp index 892b6be62ef9d..387b3ef1f6758 100644 --- a/perception/traffic_light_multi_camera_fusion/src/node.cpp +++ b/perception/traffic_light_multi_camera_fusion/src/node.cpp @@ -165,10 +165,10 @@ MultiCameraFusion::MultiCameraFusion(const rclcpp::NodeOptions & node_options) exact_sync_subs_.back()->registerCallback( std::bind(&MultiCameraFusion::trafficSignalRoiCallback, this, _1, _2, _3)); } else { - appro_sync_subs_.emplace_back(new ApproSync( - ApproSyncPolicy(10), *(cam_info_subs_.back()), *(roi_subs_.back()), + approximate_sync_subs_.emplace_back(new ApproximateSync( + ApproximateSyncPolicy(10), *(cam_info_subs_.back()), *(roi_subs_.back()), *(signal_subs_.back()))); - appro_sync_subs_.back()->registerCallback( + approximate_sync_subs_.back()->registerCallback( std::bind(&MultiCameraFusion::trafficSignalRoiCallback, this, _1, _2, _3)); } } @@ -186,14 +186,14 @@ void MultiCameraFusion::trafficSignalRoiCallback( rclcpp::Time stamp(roi_msg->header.stamp); /* Insert the received record array to the table. - Attention should be paied that this record array might not have the newest timestamp + Attention should be payed that this record array might not have the newest timestamp */ record_arr_set_.insert( FusionRecordArr{cam_info_msg->header, *cam_info_msg, *roi_msg, *signal_msg}); - std::map fusioned_record_map, grouped_record_map; - multiCameraFusion(fusioned_record_map); - groupFusion(fusioned_record_map, grouped_record_map); + std::map fused_record_map, grouped_record_map; + multiCameraFusion(fused_record_map); + groupFusion(fused_record_map, grouped_record_map); NewSignalArrayType msg_out; convertOutputMsg(grouped_record_map, msg_out); @@ -237,9 +237,9 @@ void MultiCameraFusion::convertOutputMsg( } } -void MultiCameraFusion::multiCameraFusion(std::map & fusioned_record_map) +void MultiCameraFusion::multiCameraFusion(std::map & fused_record_map) { - fusioned_record_map.clear(); + fused_record_map.clear(); /* this should not happen. Just in case */ @@ -259,7 +259,7 @@ void MultiCameraFusion::multiCameraFusion(std::map & fusio it = record_arr_set_.erase(it); } else { /* - generate fusioned record result with the saved records + generate fused record result with the saved records */ const FusionRecordArr & record_arr = *it; for (size_t i = 0; i < record_arr.rois.rois.size(); i++) { @@ -279,9 +279,9 @@ void MultiCameraFusion::multiCameraFusion(std::map & fusio update it */ if ( - fusioned_record_map.find(roi.traffic_light_id) == fusioned_record_map.end() || - ::compareRecord(record, fusioned_record_map[roi.traffic_light_id]) >= 0) { - fusioned_record_map[roi.traffic_light_id] = record; + fused_record_map.find(roi.traffic_light_id) == fused_record_map.end() || + ::compareRecord(record, fused_record_map[roi.traffic_light_id]) >= 0) { + fused_record_map[roi.traffic_light_id] = record; } } it++; @@ -290,11 +290,11 @@ void MultiCameraFusion::multiCameraFusion(std::map & fusio } void MultiCameraFusion::groupFusion( - std::map & fusioned_record_map, + std::map & fused_record_map, std::map & grouped_record_map) { grouped_record_map.clear(); - for (auto & p : fusioned_record_map) { + for (auto & p : fused_record_map) { IdType roi_id = p.second.roi.traffic_light_id; /* this should not happen diff --git a/perception/traffic_light_occlusion_predictor/include/traffic_light_occlusion_predictor/occlusion_predictor.hpp b/perception/traffic_light_occlusion_predictor/include/traffic_light_occlusion_predictor/occlusion_predictor.hpp index 9c14cd992fac8..e37083f81decd 100644 --- a/perception/traffic_light_occlusion_predictor/include/traffic_light_occlusion_predictor/occlusion_predictor.hpp +++ b/perception/traffic_light_occlusion_predictor/include/traffic_light_occlusion_predictor/occlusion_predictor.hpp @@ -77,7 +77,7 @@ class CloudOcclusionPredictor uint32_t horizontal_sample_num, uint32_t vertical_sample_num, pcl::PointCloud & cloud_out); - void calcRoiVectex3D( + void calcRoiVector3D( const tier4_perception_msgs::msg::TrafficLightRoi & roi, const image_geometry::PinholeCameraModel & pinhole_model, const std::map & traffic_light_position_map, diff --git a/perception/traffic_light_occlusion_predictor/src/occlusion_predictor.cpp b/perception/traffic_light_occlusion_predictor/src/occlusion_predictor.cpp index eff8921a897a3..7daa0849abe5e 100644 --- a/perception/traffic_light_occlusion_predictor/src/occlusion_predictor.cpp +++ b/perception/traffic_light_occlusion_predictor/src/occlusion_predictor.cpp @@ -88,7 +88,7 @@ void CloudOcclusionPredictor::predict( image_geometry::PinholeCameraModel pinhole_model; pinhole_model.fromCameraInfo(*camera_info_msg); for (size_t i = 0; i < rois_msg->rois.size(); i++) { - calcRoiVectex3D( + calcRoiVector3D( rois_msg->rois[i], pinhole_model, traffic_light_position_map, tf_camera2map, roi_tls[i], roi_brs[i]); } @@ -111,7 +111,7 @@ void CloudOcclusionPredictor::predict( } } -void CloudOcclusionPredictor::calcRoiVectex3D( +void CloudOcclusionPredictor::calcRoiVector3D( const tier4_perception_msgs::msg::TrafficLightRoi & roi, const image_geometry::PinholeCameraModel & pinhole_model, const std::map & traffic_light_position_map, diff --git a/perception/traffic_light_ssd_fine_detector/lib/include/trt_plugin_helper.hpp b/perception/traffic_light_ssd_fine_detector/lib/include/trt_plugin_helper.hpp index 7ca28498ed12d..b7cf516c3c3b9 100644 --- a/perception/traffic_light_ssd_fine_detector/lib/include/trt_plugin_helper.hpp +++ b/perception/traffic_light_ssd_fine_detector/lib/include/trt_plugin_helper.hpp @@ -41,6 +41,7 @@ enum pluginStatus_t { } \ } +// cspell: ignore CUASSERT #define CUASSERT(status_) \ { \ auto s_ = status; \ @@ -50,6 +51,7 @@ enum pluginStatus_t { } \ } +// cspell: ignore CUBLASASSERT #define CUBLASASSERT(status_) \ { \ auto s_ = status_; \ @@ -58,6 +60,7 @@ enum pluginStatus_t { } \ } +// cspell: ignore CUERRORMSG #define CUERRORMSG(status_) \ { \ auto s_ = status_; \