diff --git a/.cspell-partial.json b/.cspell-partial.json index f45ae8961c56b..13849ef298019 100644 --- a/.cspell-partial.json +++ b/.cspell-partial.json @@ -5,5 +5,5 @@ "perception/bytetrack/lib/**" ], "ignoreRegExpList": [], - "words": [] + "words": ["dltype", "tvmgen"] } diff --git a/common/tvm_utility/.gitignore b/common/tvm_utility/.gitignore index a09bb7234b379..e69de29bb2d1d 100644 --- a/common/tvm_utility/.gitignore +++ b/common/tvm_utility/.gitignore @@ -1,2 +0,0 @@ -artifacts/**/*.jpg -data/ diff --git a/common/tvm_utility/CMakeLists.txt b/common/tvm_utility/CMakeLists.txt index b49f141d9e2e4..c0a0d7385f615 100644 --- a/common/tvm_utility/CMakeLists.txt +++ b/common/tvm_utility/CMakeLists.txt @@ -29,15 +29,19 @@ set(TVM_UTILITY_NODE_LIB_HEADERS ament_auto_add_library(${PROJECT_NAME} SHARED ${TVM_UTILITY_NODE_LIB_HEADERS}) set_target_properties(${PROJECT_NAME} PROPERTIES LINKER_LANGUAGE CXX) -if(BUILD_TESTING) +set(BUILD_EXAMPLE OFF CACHE BOOL "enable build yolo_v2_tiny") + +if(BUILD_TESTING OR BUILD_EXAMPLE) + find_package(OpenCV REQUIRED) + set(tvm_runtime_DIR ${tvm_vendor_DIR}) + find_package(tvm_runtime CONFIG REQUIRED) # Get target backend set(${PROJECT_NAME}_BACKEND llvm CACHE STRING "${PROJECT_NAME} neural network backend") +endif() +if(BUILD_TESTING) # compile each folder inside test/ as a test case find_package(ament_cmake_gtest REQUIRED) - find_package(OpenCV REQUIRED) - set(tvm_runtime_DIR ${tvm_vendor_DIR}) - find_package(tvm_runtime CONFIG REQUIRED) set(TEST_ARTIFACTS "${CMAKE_CURRENT_LIST_DIR}/artifacts") file(GLOB TEST_CASES test/*) @@ -47,17 +51,11 @@ if(BUILD_TESTING) endif() # the folder name becomes the test case name file(RELATIVE_PATH TEST_CASE_NAME ${CMAKE_CURRENT_LIST_DIR}/test ${TEST_FOLDER}) - # Get neural network. set(NN_DEPENDENCY "") - get_neural_network(${TEST_CASE_NAME} ${${PROJECT_NAME}_BACKEND} NN_DEPENDENCY) + get_neural_network(${TEST_CASE_NAME}_${CMAKE_SYSTEM_PROCESSOR} ${${PROJECT_NAME}_BACKEND} NN_DEPENDENCY) if(NOT NN_DEPENDENCY STREQUAL "") - if(TEST_CASE_NAME STREQUAL "yolo_v2_tiny" AND - NOT EXISTS ${TEST_ARTIFACTS}/yolo_v2_tiny/test_image_0.jpg) - message(WARNING "Missing image artifact for yolo_v2_tiny, skipping test") - continue() - endif() # add all cpp files in the folder to the target file(GLOB TEST_CASE_SOURCES ${TEST_FOLDER}/*.cpp) ament_add_gtest(${TEST_CASE_NAME} ${TEST_CASE_SOURCES}) @@ -75,7 +73,7 @@ if(BUILD_TESTING) target_include_directories("${TEST_CASE_NAME}" SYSTEM PUBLIC "${OpenCV_INCLUDE_DIRS}" "${tvm_utility_FOUND_INCLUDE_DIRS}" - "data/models" + "data/models/${TEST_CASE_NAME}_${CMAKE_SYSTEM_PROCESSOR}" "include" ) @@ -93,5 +91,61 @@ if(BUILD_TESTING) endforeach() endif() +if(BUILD_EXAMPLE) + # compile each folder inside example/ as an example + find_package(rclcpp REQUIRED) + + set(EXAMPLE_ARTIFACTS "${CMAKE_CURRENT_LIST_DIR}/artifacts") + file(GLOB EXAMPLE_CASES example/*) + foreach(EXAMPLE_FOLDER ${EXAMPLE_CASES}) + if(NOT IS_DIRECTORY ${EXAMPLE_FOLDER}) + continue() + endif() + # the folder name becomes the example name + file(RELATIVE_PATH EXAMPLE_NAME ${CMAKE_CURRENT_LIST_DIR}/example ${EXAMPLE_FOLDER}) + # Get neural network. + set(NN_DEPENDENCY "") + get_neural_network(${EXAMPLE_NAME} ${${PROJECT_NAME}_BACKEND} NN_DEPENDENCY) + + if(NOT NN_DEPENDENCY STREQUAL "") + if(EXAMPLE_NAME STREQUAL "yolo_v2_tiny" AND + NOT EXISTS ${EXAMPLE_ARTIFACTS}/yolo_v2_tiny/test_image_0.jpg) + message(WARNING "Missing image artifact for yolo_v2_tiny, skipping example") + continue() + endif() + # add all cpp files in the folder to the target + file(GLOB EXAMPLE_SOURCES ${EXAMPLE_FOLDER}/*.cpp) + ament_auto_add_executable(${EXAMPLE_NAME} ${EXAMPLE_SOURCES}) + ament_target_dependencies(${EXAMPLE_NAME} + "ament_index_cpp" + "tvm_vendor" + "rclcpp" + ) + add_dependencies(${EXAMPLE_NAME} ${NN_DEPENDENCY}) + + target_link_libraries("${EXAMPLE_NAME}" + "${OpenCV_LIBRARIES}" + "${tvm_runtime_LIBRARIES}" + ) + + target_include_directories("${EXAMPLE_NAME}" SYSTEM PUBLIC + "${OpenCV_INCLUDE_DIRS}" + "${tvm_utility_FOUND_INCLUDE_DIRS}" + "data/models" + "include" + ) + + else() + message(WARNING "No model is generated for ${EXAMPLE_FOLDER} example") + endif() + + endforeach() +endif() + list(APPEND ${PROJECT_NAME}_CONFIG_EXTRAS "${PROJECT_NAME}-extras.cmake") -ament_auto_package() +# ament_auto_package() +ament_auto_package( + INSTALL_TO_SHARE + launch + config + artifacts) diff --git a/common/tvm_utility/README.md b/common/tvm_utility/README.md index 7d4874d5ed89a..4751428353886 100644 --- a/common/tvm_utility/README.md +++ b/common/tvm_utility/README.md @@ -41,7 +41,7 @@ The earliest supported version depends on each package making use of the inferen #### Models -Dependent packages are expected to use the `get_neural_network` cmake function from this package in order to get the compiled TVM models. +Dependent packages are expected to use the `get_neural_network` cmake function from this package in order to build proper external dependency. ### Error detection and handling @@ -50,76 +50,55 @@ error description. ### Neural Networks Provider -This package also provides a utility to get pre-compiled neural networks to packages using them for their inference. - The neural networks are compiled as part of the [Model Zoo](https://github.com/autowarefoundation/modelzoo/) CI pipeline and saved to an S3 bucket. -This package exports cmake variables and functions for ease of access to those neural networks. The `get_neural_network` function creates an abstraction for the artifact management. -The artifacts are saved under the source directory of the package making use of the function; under "data/". -Priority is given to user-provided files, under "data/user/${MODEL_NAME}/". -If there are no user-provided files, the function tries to reuse previously-downloaded artifacts. -If there are no previously-downloaded artifacts, and if the `DOWNLOAD_ARTIFACTS` cmake variable is set, they will be downloaded from the bucket. -Otherwise, nothing happens. +Users should check if model configuration header file is under "data/user/${MODEL_NAME}/". Otherwise, nothing happens and compilation of the package will be skipped. The structure inside of the source directory of the package making use of the function is as follow: ```{text} . ├── data -│ ├── downloads -│ │ ├── ${MODEL 1}-${ARCH 1}-{BACKEND 1}-{VERSION 1}.tar.gz -│ │ ├── ... -│ │ └── ${MODEL ...}-${ARCH ...}-{BACKEND ...}-{VERSION ...}.tar.gz -│ ├── models -│ │ ├── ${MODEL 1} -│ │ │ ├── ... -│ │ │ └── inference_engine_tvm_config.hpp -│ │ ├── ... -│ │ └── ${MODEL ...} -│ │ └── ... -│ └── user +│ └── models │ ├── ${MODEL 1} -│ │ ├── deploy_graph.json -│ │ ├── deploy_lib.so -│ │ ├── deploy_param.params │ │ └── inference_engine_tvm_config.hpp │ ├── ... │ └── ${MODEL ...} │ └── ... ``` -The `inference_engine_tvm_config.hpp` file needed for compilation by dependent packages is made available under "data/models/${MODEL_NAME}/inference_engine_tvm_config.hpp". +The `inference_engine_tvm_config.hpp` file needed for compilation by dependent packages should be available under "data/models/${MODEL_NAME}/inference_engine_tvm_config.hpp". Dependent packages can use the cmake `add_dependencies` function with the name provided in the `DEPENDENCY` output parameter of `get_neural_network` to ensure this file is created before it gets used. The other `deploy_*` files are installed to "models/${MODEL_NAME}/" under the `share` directory of the package. -The target version to be downloaded can be overwritten by setting the `MODELZOO_VERSION` cmake variable. - -#### Assumptions / Known limits - -If several packages make use of the same neural network, it will be downloaded once per package. - -In case a requested artifact doesn't exist in the S3 bucket, the error message from ExternalProject is not explicit enough for the user to understand what went wrong. +The other model files should be stored in autoware_data folder under package folder with the structure: -In case the user manually sets `MODELZOO_VERSION` to "latest", the archive will not be re-downloaded when it gets updated in the S3 bucket (it is not a problem for tagged versions as they are not expected to be updated). +```{text} +$HOME/autoware_data +| └──${package} +| └──models +| ├── ${MODEL 1} +| | ├── deploy_graph.json +| | ├── deploy_lib.so +| | └── deploy_param.params +| ├── ... +| └── ${MODEL ...} +| └── ... +``` #### Inputs / Outputs -Inputs: - -- `DOWNLOAD_ARTIFACTS` cmake variable; needs to be set to enable downloading the artifacts -- `MODELZOO_VERSION` cmake variable; can be used to overwrite the default target version of downloads - Outputs: -- `get_neural_network` cmake function; can be used to get a neural network compiled for a specific backend +- `get_neural_network` cmake function; create proper external dependency for a package with use of the model provided by the user. In/Out: - The `DEPENDENCY` argument of `get_neural_network` can be checked for the outcome of the function. - It is an empty string when the neural network couldn't be made available. + It is an empty string when the neural network wasn't provided by the user. ## Security considerations diff --git a/common/tvm_utility/config/yolo_v2_tiny_example.param.yaml b/common/tvm_utility/config/yolo_v2_tiny_example.param.yaml new file mode 100644 index 0000000000000..b63e4a99f97f2 --- /dev/null +++ b/common/tvm_utility/config/yolo_v2_tiny_example.param.yaml @@ -0,0 +1,6 @@ +/**: + ros__parameters: + image_filename: $(find-pkg-share tvm_utility)/artifacts/yolo_v2_tiny/test_image_0.jpg + label_filename: $(find-pkg-share tvm_utility)/artifacts/yolo_v2_tiny/labels.txt + anchor_filename: $(find-pkg-share tvm_utility)/artifacts/yolo_v2_tiny/anchors.csv + data_path: $(env HOME)/autoware_data diff --git a/common/tvm_utility/data/models/abs_model_aarch64/deploy_graph.json b/common/tvm_utility/data/models/abs_model_aarch64/deploy_graph.json new file mode 100644 index 0000000000000..b226c01747dca --- /dev/null +++ b/common/tvm_utility/data/models/abs_model_aarch64/deploy_graph.json @@ -0,0 +1,36 @@ +{ + "nodes": [ + { + "op": "null", + "name": "a", + "inputs": [] + }, + { + "op": "tvm_op", + "name": "tvmgen_default_fused_abs", + "attrs": { + "num_outputs": "1", + "num_inputs": "1", + "flatten_data": "0", + "func_name": "tvmgen_default_fused_abs", + "hash": "1be44995aa501758" + }, + "inputs": [[0, 0, 0]] + } + ], + "arg_nodes": [0], + "heads": [[1, 0, 0]], + "attrs": { + "dltype": ["list_str", ["float32", "float32"]], + "device_index": ["list_int", [1, 1]], + "storage_id": ["list_int", [0, 1]], + "shape": [ + "list_shape", + [ + [2, 2], + [2, 2] + ] + ] + }, + "node_row_ptr": [0, 1, 2] +} diff --git a/common/tvm_utility/data/models/abs_model_aarch64/deploy_lib.so b/common/tvm_utility/data/models/abs_model_aarch64/deploy_lib.so new file mode 100755 index 0000000000000..e1ad7cebad734 Binary files /dev/null and b/common/tvm_utility/data/models/abs_model_aarch64/deploy_lib.so differ diff --git a/common/tvm_utility/data/models/abs_model_aarch64/deploy_param.params b/common/tvm_utility/data/models/abs_model_aarch64/deploy_param.params new file mode 100644 index 0000000000000..1011def01ed82 Binary files /dev/null and b/common/tvm_utility/data/models/abs_model_aarch64/deploy_param.params differ diff --git a/common/tvm_utility/data/models/abs_model_aarch64/inference_engine_tvm_config.hpp b/common/tvm_utility/data/models/abs_model_aarch64/inference_engine_tvm_config.hpp new file mode 100644 index 0000000000000..09c8c0beacebe --- /dev/null +++ b/common/tvm_utility/data/models/abs_model_aarch64/inference_engine_tvm_config.hpp @@ -0,0 +1,54 @@ +// Copyright 2021 Arm Limited and Contributors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "tvm_utility/pipeline.hpp" + +#ifndef COMMON__TVM_UTILITY__DATA__MODELS__ABS_MODEL_AARCH64__INFERENCE_ENGINE_TVM_CONFIG_HPP_ // NOLINT +#define COMMON__TVM_UTILITY__DATA__MODELS__ABS_MODEL_AARCH64__INFERENCE_ENGINE_TVM_CONFIG_HPP_ + +namespace model_zoo +{ +namespace inf_test +{ +namespace engine_load +{ +namespace abs_model +{ + +static const tvm_utility::pipeline::InferenceEngineTVMConfig config{ + {0, 0, 0}, // modelzoo_version + + // cspell: ignore mtriple + "abs_model_aarch64", // network_name + "llvm -mtriple=aarch64-linux-gnu", // network_backend + + "deploy_lib.so", // network_module_path + "deploy_graph.json", // network_graph_path + "deploy_param.params", // network_params_path + + // cspell: ignore DLCPU + kDLCPU, // tvm_device_type + 0, // tvm_device_id + + {{"a", kDLFloat, 32, 1, {2, 2}}}, // network_inputs + + {{"output", kDLFloat, 32, 1, {2, 2}}} // network_outputs +}; + +} // namespace abs_model +} // namespace engine_load +} // namespace inf_test +} // namespace model_zoo +#endif // COMMON__TVM_UTILITY__DATA__MODELS__ABS_MODEL_AARCH64__INFERENCE_ENGINE_TVM_CONFIG_HPP_ + // NOLINT diff --git a/common/tvm_utility/data/models/abs_model_x86_64/deploy_graph.json b/common/tvm_utility/data/models/abs_model_x86_64/deploy_graph.json new file mode 100644 index 0000000000000..b226c01747dca --- /dev/null +++ b/common/tvm_utility/data/models/abs_model_x86_64/deploy_graph.json @@ -0,0 +1,36 @@ +{ + "nodes": [ + { + "op": "null", + "name": "a", + "inputs": [] + }, + { + "op": "tvm_op", + "name": "tvmgen_default_fused_abs", + "attrs": { + "num_outputs": "1", + "num_inputs": "1", + "flatten_data": "0", + "func_name": "tvmgen_default_fused_abs", + "hash": "1be44995aa501758" + }, + "inputs": [[0, 0, 0]] + } + ], + "arg_nodes": [0], + "heads": [[1, 0, 0]], + "attrs": { + "dltype": ["list_str", ["float32", "float32"]], + "device_index": ["list_int", [1, 1]], + "storage_id": ["list_int", [0, 1]], + "shape": [ + "list_shape", + [ + [2, 2], + [2, 2] + ] + ] + }, + "node_row_ptr": [0, 1, 2] +} diff --git a/common/tvm_utility/data/models/abs_model_x86_64/deploy_lib.so b/common/tvm_utility/data/models/abs_model_x86_64/deploy_lib.so new file mode 100644 index 0000000000000..9a6d02817e048 Binary files /dev/null and b/common/tvm_utility/data/models/abs_model_x86_64/deploy_lib.so differ diff --git a/common/tvm_utility/data/models/abs_model_x86_64/deploy_param.params b/common/tvm_utility/data/models/abs_model_x86_64/deploy_param.params new file mode 100644 index 0000000000000..1011def01ed82 Binary files /dev/null and b/common/tvm_utility/data/models/abs_model_x86_64/deploy_param.params differ diff --git a/common/tvm_utility/data/models/abs_model_x86_64/inference_engine_tvm_config.hpp b/common/tvm_utility/data/models/abs_model_x86_64/inference_engine_tvm_config.hpp new file mode 100644 index 0000000000000..7a7e3ef97c1b3 --- /dev/null +++ b/common/tvm_utility/data/models/abs_model_x86_64/inference_engine_tvm_config.hpp @@ -0,0 +1,53 @@ +// Copyright 2021 Arm Limited and Contributors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "tvm_utility/pipeline.hpp" + +#ifndef COMMON__TVM_UTILITY__DATA__MODELS__ABS_MODEL_X86_64__INFERENCE_ENGINE_TVM_CONFIG_HPP_ // NOLINT +#define COMMON__TVM_UTILITY__DATA__MODELS__ABS_MODEL_X86_64__INFERENCE_ENGINE_TVM_CONFIG_HPP_ + +namespace model_zoo +{ +namespace inf_test +{ +namespace engine_load +{ +namespace abs_model +{ + +static const tvm_utility::pipeline::InferenceEngineTVMConfig config{ + {0, 0, 0}, // modelzoo_version + + "abs_model_x86_64", // network_name + "llvm", // network_backend + + "deploy_lib.so", // network_module_path + "deploy_graph.json", // network_graph_path + "deploy_param.params", // network_params_path + + // cspell: ignore DLCPU + kDLCPU, // tvm_device_type + 0, // tvm_device_id + + {{"a", kDLFloat, 32, 1, {2, 2}}}, // network_inputs + + {{"output", kDLFloat, 32, 1, {2, 2}}} // network_outputs +}; + +} // namespace abs_model +} // namespace engine_load +} // namespace inf_test +} // namespace model_zoo +#endif // COMMON__TVM_UTILITY__DATA__MODELS__ABS_MODEL_X86_64__INFERENCE_ENGINE_TVM_CONFIG_HPP_ + // NOLINT diff --git a/common/tvm_utility/data/models/yolo_v2_tiny/inference_engine_tvm_config.hpp b/common/tvm_utility/data/models/yolo_v2_tiny/inference_engine_tvm_config.hpp new file mode 100644 index 0000000000000..45ff0d8ce33e3 --- /dev/null +++ b/common/tvm_utility/data/models/yolo_v2_tiny/inference_engine_tvm_config.hpp @@ -0,0 +1,56 @@ +// Copyright 2021 Arm Limited and Contributors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "tvm_utility/pipeline.hpp" + +#ifndef COMMON__TVM_UTILITY__DATA__MODELS__YOLO_V2_TINY__INFERENCE_ENGINE_TVM_CONFIG_HPP_ // NOLINT +#define COMMON__TVM_UTILITY__DATA__MODELS__YOLO_V2_TINY__INFERENCE_ENGINE_TVM_CONFIG_HPP_ + +namespace model_zoo +{ +namespace perception +{ +namespace camera_obstacle_detection +{ +namespace yolo_v2_tiny +{ +namespace tensorflow_fp32_coco +{ + +static const tvm_utility::pipeline::InferenceEngineTVMConfig config{ + {3, 0, 0}, // modelzoo_version + + "yolo_v2_tiny", // network_name + "llvm", // network_backend + + // cspell: ignore DLCPU + "./deploy_lib.so", // network_module_path + "./deploy_graph.json", // network_graph_path + "./deploy_param.params", // network_params_path + + kDLCPU, // tvm_device_type + 0, // tvm_device_id + + {{"input", kDLFloat, 32, 1, {-1, 416, 416, 3}}}, // network_inputs + + {{"output", kDLFloat, 32, 1, {1, 13, 13, 425}}} // network_outputs +}; + +} // namespace tensorflow_fp32_coco +} // namespace yolo_v2_tiny +} // namespace camera_obstacle_detection +} // namespace perception +} // namespace model_zoo +#endif // COMMON__TVM_UTILITY__DATA__MODELS__YOLO_V2_TINY__INFERENCE_ENGINE_TVM_CONFIG_HPP_ + // NOLINT diff --git a/common/tvm_utility/test/yolo_v2_tiny/main.cpp b/common/tvm_utility/example/yolo_v2_tiny/main.cpp similarity index 80% rename from common/tvm_utility/test/yolo_v2_tiny/main.cpp rename to common/tvm_utility/example/yolo_v2_tiny/main.cpp index aac7900f423c2..8a89436170894 100644 --- a/common/tvm_utility/test/yolo_v2_tiny/main.cpp +++ b/common/tvm_utility/example/yolo_v2_tiny/main.cpp @@ -12,30 +12,21 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "gtest/gtest.h" #include "tvm_utility/pipeline.hpp" #include "yolo_v2_tiny/inference_engine_tvm_config.hpp" #include +#include #include +#include +#include #include #include #include using model_zoo::perception::camera_obstacle_detection::yolo_v2_tiny::tensorflow_fp32_coco::config; -// Name of file containing the human readable names of the classes. One class -// on each line. -static constexpr const char * LABEL_FILENAME = "./yolo_v2_tiny_artifacts/labels.txt"; - -// Name of file containing the anchor values for the network. Each line is one -// anchor. each anchor has 2 comma separated floating point values. -static constexpr const char * ANCHOR_FILENAME = "./yolo_v2_tiny_artifacts/anchors.csv"; - -// Filename of the image on which to run the inference -static constexpr const char * IMAGE_FILENAME = "./yolo_v2_tiny_artifacts/test_image_0.jpg"; - namespace tvm_utility { namespace yolo_v2_tiny @@ -118,16 +109,18 @@ class PreProcessorYoloV2Tiny : public tvm_utility::pipeline::PreProcessor> { public: - explicit PostProcessorYoloV2Tiny(tvm_utility::pipeline::InferenceEngineTVMConfig config) + explicit PostProcessorYoloV2Tiny( + tvm_utility::pipeline::InferenceEngineTVMConfig config, std::string label_filename, + std::string anchor_filename) : network_output_width(config.network_outputs[0].node_shape[1]), network_output_height(config.network_outputs[0].node_shape[2]), network_output_depth(config.network_outputs[0].node_shape[3]), network_output_datatype_bytes(config.network_outputs[0].tvm_dtype_bits / 8) { // Parse human readable names for the classes - std::ifstream label_file{LABEL_FILENAME}; + std::ifstream label_file{label_filename}; if (!label_file.good()) { - std::string label_filename = LABEL_FILENAME; + std::string label_filename = label_filename; throw std::runtime_error("unable to open label file:" + label_filename); } std::string line{}; @@ -136,9 +129,9 @@ class PostProcessorYoloV2Tiny : public tvm_utility::pipeline::PostProcessor> anchors{}; }; -TEST(PipelineExamples, SimplePipeline) +} // namespace yolo_v2_tiny +} // namespace tvm_utility + +bool check_near(double expected, double actual, double tolerance) +{ + return fabs(expected - actual) <= tolerance; +} + +int main(int argc, char * argv[]) { + // init node to use parameters + rclcpp::init(argc, argv); + auto node = rclcpp::Node::make_shared("yolo_v2_tiny_example"); + // Filename of the image on which to run the inference + node->declare_parameter("image_filename"); + // Name of file containing the human readable names of the classes. One class + // on each line. + node->declare_parameter("label_filename"); + // Name of file containing the anchor values for the network. Each line is one + // anchor. each anchor has 2 comma separated floating point values. + node->declare_parameter("anchor_filename"); + // Packages data and artifacts directory path. + node->declare_parameter("data_path"); + + RCLCPP_INFO(node->get_logger(), "Node started"); + // Instantiate the pipeline - using PrePT = PreProcessorYoloV2Tiny; + using PrePT = tvm_utility::yolo_v2_tiny::PreProcessorYoloV2Tiny; using IET = tvm_utility::pipeline::InferenceEngineTVM; - using PostPT = PostProcessorYoloV2Tiny; + using PostPT = tvm_utility::yolo_v2_tiny::PostProcessorYoloV2Tiny; PrePT PreP{config}; - IET IE{config, "tvm_utility"}; - PostPT PostP{config}; + IET IE{config, "tvm_utility", node->get_parameter("data_path").as_string()}; + PostPT PostP{ + config, node->get_parameter("label_filename").as_string(), + node->get_parameter("anchor_filename").as_string()}; tvm_utility::pipeline::Pipeline pipeline(PreP, IE, PostP); - auto version_status = IE.version_check({2, 0, 0}); - EXPECT_NE(version_status, tvm_utility::Version::Unsupported); - // Push data input the pipeline and get the output - auto output = pipeline.schedule(IMAGE_FILENAME); + auto output = pipeline.schedule(node->get_parameter("image_filename").as_string()); // Define reference vector containing expected values, expressed as hexadecimal integers std::vector int_output{0x3eb64594, 0x3f435656, 0x3ece1600, 0x3e99d381, @@ -271,11 +287,21 @@ TEST(PipelineExamples, SimplePipeline) } // Test: check if the generated output is equal to the reference - EXPECT_EQ(expected_output.size(), output.size()) << "Unexpected output size"; + if (expected_output.size() == output.size()) { + RCLCPP_INFO(node->get_logger(), "Model has proper output size"); + } else { + RCLCPP_INFO(node->get_logger(), "Model has unexpected output size"); + } + for (size_t i = 0; i < output.size(); ++i) { - EXPECT_NEAR(expected_output[i], output[i], 0.0001) << "at index: " << i; + if (check_near(expected_output[i], output[i], 0.0001)) { + std::cout << "Model has proper output at index: " << i << std::endl; + RCLCPP_INFO(node->get_logger(), "Model has proper output at index: %zu", i); + + } else { + RCLCPP_INFO(node->get_logger(), "Model has unexpected output at index: %zu", i); + } } + rclcpp::shutdown(); + return 0; } - -} // namespace yolo_v2_tiny -} // namespace tvm_utility diff --git a/common/tvm_utility/include/tvm_utility/pipeline.hpp b/common/tvm_utility/include/tvm_utility/pipeline.hpp index 8504da193214f..a053d5ee471be 100644 --- a/common/tvm_utility/include/tvm_utility/pipeline.hpp +++ b/common/tvm_utility/include/tvm_utility/pipeline.hpp @@ -224,12 +224,19 @@ typedef struct class InferenceEngineTVM : public InferenceEngine { public: - explicit InferenceEngineTVM(const InferenceEngineTVMConfig & config, const std::string & pkg_name) + explicit InferenceEngineTVM( + const InferenceEngineTVMConfig & config, const std::string & pkg_name, + const std::string & autoware_data_path = "") : config_(config) { // Get full network path - std::string network_prefix = ament_index_cpp::get_package_share_directory(pkg_name) + - "/models/" + config.network_name + "/"; + std::string network_prefix; + if (autoware_data_path == "") { + network_prefix = ament_index_cpp::get_package_share_directory(pkg_name) + "/models/" + + config.network_name + "/"; + } else { + network_prefix = autoware_data_path + "/" + pkg_name + "/models/" + config.network_name + "/"; + } std::string network_module_path = network_prefix + config.network_module_path; std::string network_graph_path = network_prefix + config.network_graph_path; std::string network_params_path = network_prefix + config.network_params_path; diff --git a/common/tvm_utility/launch/yolo_v2_tiny_example.launch.xml b/common/tvm_utility/launch/yolo_v2_tiny_example.launch.xml new file mode 100644 index 0000000000000..045a6fc9dfa27 --- /dev/null +++ b/common/tvm_utility/launch/yolo_v2_tiny_example.launch.xml @@ -0,0 +1,23 @@ + + + + + + + + + diff --git a/common/tvm_utility/schema/yolo_v2_tiny_example.schema.json b/common/tvm_utility/schema/yolo_v2_tiny_example.schema.json new file mode 100644 index 0000000000000..8ee1987f73a62 --- /dev/null +++ b/common/tvm_utility/schema/yolo_v2_tiny_example.schema.json @@ -0,0 +1,47 @@ +{ + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "Parameters for yolo_v2_tiny_example of tvm_utility", + "type": "object", + "definitions": { + "yolo_v2_tiny_example": { + "type": "object", + "properties": { + "image_filename": { + "type": "string", + "default": "$(find-pkg-share tvm_utility)/artifacts/yolo_v2_tiny/test_image_0.jpg", + "description": "Filename of the image on which to run the inference." + }, + "label_filename": { + "type": "string", + "default": "$(find-pkg-share tvm_utility)/artifacts/yolo_v2_tiny/labels.txt", + "description": "Name of file containing the human readable names of the classes. One class on each line." + }, + "anchor_filename": { + "type": "string", + "default": "$(find-pkg-share tvm_utility)/artifacts/yolo_v2_tiny/anchors.csv", + "description": "Name of file containing the anchor values for the network. Each line is one anchor. each anchor has 2 comma separated floating point values." + }, + "data_path": { + "type": "string", + "default": "$(env HOME)/autoware_data", + "description": "Packages data and artifacts directory path." + } + }, + "required": ["image_filename", "label_filename", "anchor_filename", "data_path"] + } + }, + "properties": { + "/**": { + "type": "object", + "properties": { + "ros__parameters": { + "$ref": "#/definitions/yolo_v2_tiny_example" + } + }, + "required": ["ros__parameters"], + "additionalProperties": false + } + }, + "required": ["/**"], + "additionalProperties": false +} diff --git a/common/tvm_utility/test/abs_model/main.cpp b/common/tvm_utility/test/abs_model/main.cpp new file mode 100644 index 0000000000000..4bf1a69c0556b --- /dev/null +++ b/common/tvm_utility/test/abs_model/main.cpp @@ -0,0 +1,146 @@ +// Copyright 2021-2022 Arm Limited and Contributors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "gtest/gtest.h" +#include "tvm_utility/pipeline.hpp" +// file for current arch x86 or arm is chosen in cmake file +#include +#include + +#include +#include +#include +#include +#include + +using model_zoo::inf_test::engine_load::abs_model::config; + +namespace tvm_utility +{ +namespace abs_model +{ + +class PreProcessorLinearModel : public tvm_utility::pipeline::PreProcessor> +{ +public: + explicit PreProcessorLinearModel(tvm_utility::pipeline::InferenceEngineTVMConfig config) + : network_input_a_width(config.network_inputs[0].node_shape[0]), + network_input_a_height(config.network_inputs[0].node_shape[1]), + network_input_datatype_bytes(config.network_inputs[0].tvm_dtype_bits / 8) + { + // Allocate input variable + std::vector shape_a{network_input_a_width, network_input_a_height}; + tvm_utility::pipeline::TVMArrayContainer a{ + shape_a, + config.network_inputs[0].tvm_dtype_code, + config.network_inputs[0].tvm_dtype_bits, + config.network_inputs[0].tvm_dtype_lanes, + config.tvm_device_type, + config.tvm_device_id}; + + output = a; + } + + // The cv::Mat can't be used as an input because it throws an exception when + // passed as a constant reference + tvm_utility::pipeline::TVMArrayContainerVector schedule(const std::vector & input) + { + float input_mat[2][2]; + input_mat[0][0] = input[0]; + input_mat[0][1] = input[1]; + input_mat[1][0] = input[2]; + input_mat[1][1] = input[3]; + + // Create cv::Mat from input array + cv::Mat a_input = cv::Mat(2, 2, CV_32F, &input_mat); + + TVMArrayCopyFromBytes( + output.getArray(), a_input.data, + network_input_a_width * network_input_a_height * network_input_datatype_bytes); + + return {output}; + } + +private: + int64_t network_input_a_width; + int64_t network_input_a_height; + int64_t network_input_datatype_bytes; + tvm_utility::pipeline::TVMArrayContainer output; +}; + +class PostProcessorLinearModel : public tvm_utility::pipeline::PostProcessor> +{ +public: + explicit PostProcessorLinearModel(tvm_utility::pipeline::InferenceEngineTVMConfig config) + : network_output_width(config.network_outputs[0].node_shape[0]), + network_output_height(config.network_outputs[0].node_shape[1]), + network_output_datatype_bytes(config.network_outputs[0].tvm_dtype_bits / 8) + { + } + + std::vector schedule(const tvm_utility::pipeline::TVMArrayContainerVector & input) + { + // Assert data is stored row-majored in input and the dtype is float + assert(input[0].getArray()->strides == nullptr); + assert(input[0].getArray()->dtype.bits == sizeof(float) * 8); + + // Copy the inference data to CPU memory + std::vector infer(network_output_width * network_output_height, 0.0f); + + TVMArrayCopyToBytes( + input[0].getArray(), infer.data(), + network_output_width * network_output_height * network_output_datatype_bytes); + + return infer; + } + +private: + int64_t network_output_width; + int64_t network_output_height; + int64_t network_output_datatype_bytes; +}; + +TEST(PipelineExamples, SimplePipeline) +{ + // // Instantiate the pipeline + using PrePT = PreProcessorLinearModel; + using IET = tvm_utility::pipeline::InferenceEngineTVM; + using PostPT = PostProcessorLinearModel; + + PrePT PreP{config}; + IET IE{config, "tvm_utility"}; + PostPT PostP{config}; + + tvm_utility::pipeline::Pipeline pipeline(PreP, IE, PostP); + + auto version_status = IE.version_check({2, 0, 0}); + EXPECT_NE(version_status, tvm_utility::Version::Unsupported); + + // create input array + std::vector input_arr{-1., -2., -3., 4.}; + // send it to the model + auto output = pipeline.schedule(input_arr); + + // define vector with expected values + std::vector expected_output{1., 2., 3., 4.}; + + // // Test: check if the generated output is equal to the reference + EXPECT_EQ(expected_output.size(), output.size()) << "Unexpected output size"; + for (size_t i = 0; i < output.size(); ++i) { + EXPECT_NEAR(expected_output[i], output[i], 0.0001) << "at index: " << i; + } +} + +} // namespace abs_model +} // namespace tvm_utility diff --git a/common/tvm_utility/tvm-utility-yolo-v2-tiny-tests.md b/common/tvm_utility/tvm-utility-yolo-v2-tiny-tests.md index 188918a8f74df..39bcc640c2147 100644 --- a/common/tvm_utility/tvm-utility-yolo-v2-tiny-tests.md +++ b/common/tvm_utility/tvm-utility-yolo-v2-tiny-tests.md @@ -7,20 +7,41 @@ output. ## Compiling the Example -1. Download an example image to be used as test input. this image needs to be - saved in the `artifacts/yolo_v2_tiny/` folder + -```sh -curl https://raw.githubusercontent.com/pjreddie/darknet/master/data/dog.jpg \ - > artifacts/yolo_v2_tiny/test_image_0.jpg -``` +1. Check if model was downloaded during the env preparation step by ansible and + models files exist in the folder $HOME/autoware_data/tvm_utility/models/yolo_v2_tiny. -1. Build and test with the `DOWNLOAD_ARTIFACTS` flag set. + If not you can download them manually, see [Manual Artifacts Downloading](https://github.com/autowarefoundation/autoware/tree/main/ansible/roles/artifacts). -```sh -colcon build --packages-up-to tvm_utility --cmake-args -DDOWNLOAD_ARTIFACTS=ON -colcon test --packages-select tvm_utility -``` +2. Download an example image to be used as test input. This image needs to be + saved in the `artifacts/yolo_v2_tiny/` folder. + + ```sh + curl https://raw.githubusercontent.com/pjreddie/darknet/master/data/dog.jpg \ + > artifacts/yolo_v2_tiny/test_image_0.jpg + ``` + +3. Build. + + ```sh + colcon build --packages-up-to tvm_utility --cmake-args -DBUILD_EXAMPLE=ON + ``` + +4. Run. + + ```sh + ros2 launch tvm_utility yolo_v2_tiny_example.launch.xml + ``` + +## Parameters + +| Name | Type | Default Value | Description | +| ----------------- | ------ | ----------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------- | +| `image_filename` | string | `$(find-pkg-share tvm_utility)/artifacts/yolo_v2_tiny/test_image_0.jpg` | Filename of the image on which to run the inference. | +| `label_filename` | string | `$(find-pkg-share tvm_utility)/artifacts/yolo_v2_tiny/labels.txt` | Name of file containing the human readable names of the classes. One class on each line. | +| `anchor_filename` | string | `$(find-pkg-share tvm_utility)/artifacts/yolo_v2_tiny/anchors.csv` | Name of file containing the anchor values for the network. Each line is one anchor. each anchor has 2 comma separated floating point values. | +| `data_path` | string | `$(env HOME)/autoware_data` | Packages data and artifacts directory path. | ## GPU backend @@ -28,5 +49,5 @@ Vulkan is supported by default by the tvm_vendor package. It can be selected by setting the `tvm_utility_BACKEND` variable: ```sh -colcon build --packages-up-to tvm_utility --cmake-args -DDOWNLOAD_ARTIFACTS=ON -Dtvm_utility_BACKEND=vulkan +colcon build --packages-up-to tvm_utility -Dtvm_utility_BACKEND=vulkan ``` diff --git a/common/tvm_utility/tvm_utility-extras.cmake b/common/tvm_utility/tvm_utility-extras.cmake index 4214aa4995f0f..414644c1fe041 100644 --- a/common/tvm_utility/tvm_utility-extras.cmake +++ b/common/tvm_utility/tvm_utility-extras.cmake @@ -12,12 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -# Get user-provided variables -set(DOWNLOAD_ARTIFACTS OFF CACHE BOOL "enable artifacts download") -set(MODELZOO_VERSION "3.0.0-20221221" CACHE STRING "targeted ModelZoo version") - # -# Download the selected neural network if it is not already present on disk. # Make inference_engine_tvm_config.hpp available under "data/models/${MODEL_NAME}/". # Install the TVM artifacts to "share/${PROJECT_NAME}/models/". # Return the name of the custom target in the DEPENDENCY parameter. @@ -34,48 +29,16 @@ function(get_neural_network MODEL_NAME MODEL_BACKEND DEPENDENCY) set(EXTERNALPROJECT_NAME ${MODEL_NAME}_${MODEL_BACKEND}) set(PREPROCESSING "") - # Prioritize user-provided models. - # cspell: ignore COPYONLY - if(IS_DIRECTORY "${DATA_PATH}/user/${MODEL_NAME}") - message(STATUS "Using user-provided model from ${DATA_PATH}/user/${MODEL_NAME}") - file(REMOVE_RECURSE "${DATA_PATH}/models/${MODEL_NAME}/") - configure_file( - "${DATA_PATH}/user/${MODEL_NAME}/inference_engine_tvm_config.hpp" - "${DATA_PATH}/models/${MODEL_NAME}/inference_engine_tvm_config.hpp" - COPYONLY - ) - if(EXISTS "${DATA_PATH}/user/${MODEL_NAME}/preprocessing_inference_engine_tvm_config.hpp") - configure_file( - "${DATA_PATH}/user/${MODEL_NAME}/preprocessing_inference_engine_tvm_config.hpp" - "${DATA_PATH}/models/${MODEL_NAME}/preprocessing_inference_engine_tvm_config.hpp" - COPYONLY - ) - endif() - set(SOURCE_DIR "${DATA_PATH}/user/${MODEL_NAME}") - set(INSTALL_DIRECTORY "${DATA_PATH}/user/${MODEL_NAME}") - else() - set(ARCHIVE_NAME "${MODEL_NAME}-${CMAKE_SYSTEM_PROCESSOR}-${MODEL_BACKEND}-${MODELZOO_VERSION}.tar.gz") - - # Use previously-downloaded archives if available. - set(DOWNLOAD_DIR "${DATA_PATH}/downloads") - if(DOWNLOAD_ARTIFACTS) - message(STATUS "Downloading ${ARCHIVE_NAME} ...") - if(NOT EXISTS "${DATA_PATH}/downloads/${ARCHIVE_NAME}") - set(URL "https://autoware-modelzoo.s3.us-east-2.amazonaws.com/models/${MODELZOO_VERSION}/${ARCHIVE_NAME}") - file(DOWNLOAD ${URL} "${DOWNLOAD_DIR}/${ARCHIVE_NAME}") - endif() - else() - message(WARNING "Skipped download for ${MODEL_NAME} (enable by setting DOWNLOAD_ARTIFACTS)") - set(${DEPENDENCY} "" PARENT_SCOPE) - return() - endif() + if(IS_DIRECTORY "${DATA_PATH}/models/${MODEL_NAME}") set(SOURCE_DIR "${DATA_PATH}/models/${MODEL_NAME}") set(INSTALL_DIRECTORY "${DATA_PATH}/models/${MODEL_NAME}") - file(ARCHIVE_EXTRACT INPUT "${DOWNLOAD_DIR}/${ARCHIVE_NAME}" DESTINATION "${SOURCE_DIR}") if(EXISTS "${DATA_PATH}/models/${MODEL_NAME}/preprocessing_inference_engine_tvm_config.hpp") set(PREPROCESSING "${DATA_PATH}/models/${MODEL_NAME}/preprocessing_inference_engine_tvm_config.hpp") endif() - + else() + message(WARNING "No model configuration files were provided") + set(${DEPENDENCY} "" PARENT_SCOPE) + return() endif() include(ExternalProject)