From e8e4fcde0f0d7fa11771c2deabb8cc27c0b99451 Mon Sep 17 00:00:00 2001 From: waytrue17 <52505574+waytrue17@users.noreply.github.com> Date: Tue, 19 Jan 2021 10:58:36 -0800 Subject: [PATCH] [v1.x] ONNX support adaptiveAveragePooling2D and update Softmax to support temperature (#19736) * convert adaptiveAveragePooling2D * fix line too long * add name to last node * support temperature for softmax * handle fp16 in create_tensor * fallback to nbformat5.0.8 * revert nbformat * test softmax with default temp Co-authored-by: Wei Chu --- .../contrib/onnx/mx2onnx/_op_translations.py | 37 ++++++++++++++++--- tests/python-pytest/onnx/test_operators.py | 21 +++++++++-- 2 files changed, 49 insertions(+), 9 deletions(-) diff --git a/python/mxnet/contrib/onnx/mx2onnx/_op_translations.py b/python/mxnet/contrib/onnx/mx2onnx/_op_translations.py index 1c50abe7dc90..1a5d3ed1eb79 100644 --- a/python/mxnet/contrib/onnx/mx2onnx/_op_translations.py +++ b/python/mxnet/contrib/onnx/mx2onnx/_op_translations.py @@ -186,6 +186,8 @@ def create_tensor(shape_list, shape_name, initializer, dtype='int64'): data_type = onnx.mapping.NP_TYPE_TO_TENSOR_TYPE[shape_np.dtype] dims = np.shape(shape_np) tensor_node = onnx.helper.make_tensor_value_info(shape_name, data_type, dims) + if dtype == np.float16: + shape_list = shape_np.view(dtype=np.uint16).flatten().tolist() initializer.append( onnx.helper.make_tensor( name=shape_name, @@ -859,15 +861,21 @@ def convert_softmax(node, **kwargs): name, input_nodes, attrs = get_inputs(node, kwargs) axis = int(attrs.get("axis", -1)) - temperature = attrs.get("temperature", None) - if temperature and float(temperature) != 1.0: - raise NotImplementedError("Temperature is not supported for now.") - use_length = attrs.get("use_length", None) + temperature = str(attrs.get("temperature", 'None')) + if temperature == 'None': + temperature = 1. + else: + temperature = float(temperature) + + use_length = str(attrs.get("use_length", 'None')) input_type = kwargs["in_type"] + dtype = onnx.mapping.TENSOR_TYPE_TO_NP_TYPE[input_type] data = input_nodes[0] nodes = [ - make_node("Exp", [data], [name+"_exp_out"]), + create_tensor([temperature], name+"_tmp", kwargs["initializer"], dtype=dtype), + make_node("Div", [data, name+"_tmp"], [name+'_data']), + make_node("Exp", [name+'_data'], [name+"_exp_out"]), make_node("ReduceSum", [name+"_exp_out"], [name+"_rsum_out"], axes=[axis], keepdims=1) ] if len(input_nodes) == 1: @@ -3023,3 +3031,22 @@ def convert_contrib_box_decode(node, **kwargs): ] return nodes + +@mx_op.register("_contrib_AdaptiveAvgPooling2D") +def convert_contrib_AdaptiveAvgPooling2D(node, **kwargs): + """Map MXNet's _contrib_BilinearResize2D operator attributes to onnx's operator. + """ + from onnx.helper import make_node + name, input_nodes, attrs = get_inputs(node, kwargs) + + output_size = attrs.get('output_size', '1') + output_size = convert_string_to_list(output_size) + + if len(output_size) <= 2: + if output_size[0] != 1 or (len(output_size) == 2 and output_size[1] != 1): + raise NotImplementedError("_contrib_AdaptiveAvgPooling2D operator with output_size != 1 \ + not yet implemented.") + nodes = [ + make_node("GlobalAveragePool", [input_nodes[0]], [name], name=name) + ] + return nodes diff --git a/tests/python-pytest/onnx/test_operators.py b/tests/python-pytest/onnx/test_operators.py index 4882321e39a6..66c0454c8d0a 100644 --- a/tests/python-pytest/onnx/test_operators.py +++ b/tests/python-pytest/onnx/test_operators.py @@ -338,17 +338,18 @@ def test_onnx_export_cast(tmp_path, src_dtype, dst_dtype, shape): @pytest.mark.parametrize('dtype', ['float16', 'float32']) -def test_onnx_export_softmax(tmp_path, dtype): +@pytest.mark.parametrize('temperature', [.1, 1., 10.]) +def test_onnx_export_softmax(tmp_path, dtype, temperature): x = mx.nd.random.uniform(0, 1, (2, 3, 4), dtype=dtype) M1 = def_model('softmax') op_export_test('softmax_1', M1, [x], tmp_path) - M2 = def_model('softmax', use_length=True, axis=0) + M2 = def_model('softmax', use_length=True, axis=0, temperature=temperature) l2 = mx.nd.array([[2,0,2,1],[1,1,2,1], [0,0,0,1]], dtype=int) op_export_test('softmax_2', M2, [x, l2], tmp_path) - M3 = def_model('softmax', use_length=True, axis=-1) + M3 = def_model('softmax', use_length=True, axis=-1, temperature=temperature) l3 = mx.nd.array([[2,0,4],[0,0,0]], dtype=int) op_export_test('softmax_3', M3, [x, l3], tmp_path) - M4 = def_model('softmax', use_length=True, axis=1) + M4 = def_model('softmax', use_length=True, axis=1, temperature=temperature) l4 = mx.nd.array([[2,0,3,1],[0,1,0,0]], dtype=int) op_export_test('softmax_4', M4, [x, l4], tmp_path) @@ -421,3 +422,15 @@ def test_onnx_export_contrib_box_decode(tmp_path, dtype, fmt, clip): op_export_test('contrib_box_decode', M1, [data, anchors], tmp_path) M2 = def_model('contrib.box_decode', format=fmt, clip=clip, std0=0.3, std1=1.4, std2=0.5, std3=1.6) op_export_test('contrib_box_decode', M1, [data, anchors], tmp_path) + +@pytest.mark.parametrize('dtype', ['float16', 'float32']) +def test_onnx_export_contrib_AdaptiveAvgPooling2D(tmp_path, dtype): + x = mx.nd.random.uniform(0, 1, (1, 2, 3, 4), dtype=dtype) + M1 = def_model('contrib.AdaptiveAvgPooling2D') + op_export_test('contrib_AdaptiveAvgPooling2D', M1, [x], tmp_path) + M2 = def_model('contrib.AdaptiveAvgPooling2D', output_size=1) + op_export_test('contrib_AdaptiveAvgPooling2D', M2, [x], tmp_path) + M3 = def_model('contrib.AdaptiveAvgPooling2D', output_size=[1]) + op_export_test('contrib_AdaptiveAvgPooling2D', M3, [x], tmp_path) + M4 = def_model('contrib.AdaptiveAvgPooling2D', output_size=[1,1]) + op_export_test('contrib_AdaptiveAvgPooling2D', M4, [x], tmp_path)