Skip to content
This repository has been archived by the owner on Nov 17, 2023. It is now read-only.

Commit

Permalink
[v1.x] ONNX support adaptiveAveragePooling2D and update Softmax to su…
Browse files Browse the repository at this point in the history
…pport temperature (#19736)

* convert adaptiveAveragePooling2D

* fix line too long

* add name to last node

* support temperature for softmax

* handle fp16 in create_tensor

* fallback to nbformat5.0.8

* revert nbformat

* test softmax with default temp

Co-authored-by: Wei Chu <weichu@amazon.com>
  • Loading branch information
waytrue17 and Wei Chu authored Jan 19, 2021
1 parent de17b5b commit e8e4fcd
Show file tree
Hide file tree
Showing 2 changed files with 49 additions and 9 deletions.
37 changes: 32 additions & 5 deletions python/mxnet/contrib/onnx/mx2onnx/_op_translations.py
Original file line number Diff line number Diff line change
Expand Up @@ -186,6 +186,8 @@ def create_tensor(shape_list, shape_name, initializer, dtype='int64'):
data_type = onnx.mapping.NP_TYPE_TO_TENSOR_TYPE[shape_np.dtype]
dims = np.shape(shape_np)
tensor_node = onnx.helper.make_tensor_value_info(shape_name, data_type, dims)
if dtype == np.float16:
shape_list = shape_np.view(dtype=np.uint16).flatten().tolist()
initializer.append(
onnx.helper.make_tensor(
name=shape_name,
Expand Down Expand Up @@ -859,15 +861,21 @@ def convert_softmax(node, **kwargs):
name, input_nodes, attrs = get_inputs(node, kwargs)

axis = int(attrs.get("axis", -1))
temperature = attrs.get("temperature", None)
if temperature and float(temperature) != 1.0:
raise NotImplementedError("Temperature is not supported for now.")
use_length = attrs.get("use_length", None)
temperature = str(attrs.get("temperature", 'None'))
if temperature == 'None':
temperature = 1.
else:
temperature = float(temperature)

use_length = str(attrs.get("use_length", 'None'))
input_type = kwargs["in_type"]
dtype = onnx.mapping.TENSOR_TYPE_TO_NP_TYPE[input_type]
data = input_nodes[0]

nodes = [
make_node("Exp", [data], [name+"_exp_out"]),
create_tensor([temperature], name+"_tmp", kwargs["initializer"], dtype=dtype),
make_node("Div", [data, name+"_tmp"], [name+'_data']),
make_node("Exp", [name+'_data'], [name+"_exp_out"]),
make_node("ReduceSum", [name+"_exp_out"], [name+"_rsum_out"], axes=[axis], keepdims=1)
]
if len(input_nodes) == 1:
Expand Down Expand Up @@ -3023,3 +3031,22 @@ def convert_contrib_box_decode(node, **kwargs):
]

return nodes

@mx_op.register("_contrib_AdaptiveAvgPooling2D")
def convert_contrib_AdaptiveAvgPooling2D(node, **kwargs):
"""Map MXNet's _contrib_BilinearResize2D operator attributes to onnx's operator.
"""
from onnx.helper import make_node
name, input_nodes, attrs = get_inputs(node, kwargs)

output_size = attrs.get('output_size', '1')
output_size = convert_string_to_list(output_size)

if len(output_size) <= 2:
if output_size[0] != 1 or (len(output_size) == 2 and output_size[1] != 1):
raise NotImplementedError("_contrib_AdaptiveAvgPooling2D operator with output_size != 1 \
not yet implemented.")
nodes = [
make_node("GlobalAveragePool", [input_nodes[0]], [name], name=name)
]
return nodes
21 changes: 17 additions & 4 deletions tests/python-pytest/onnx/test_operators.py
Original file line number Diff line number Diff line change
Expand Up @@ -338,17 +338,18 @@ def test_onnx_export_cast(tmp_path, src_dtype, dst_dtype, shape):


@pytest.mark.parametrize('dtype', ['float16', 'float32'])
def test_onnx_export_softmax(tmp_path, dtype):
@pytest.mark.parametrize('temperature', [.1, 1., 10.])
def test_onnx_export_softmax(tmp_path, dtype, temperature):
x = mx.nd.random.uniform(0, 1, (2, 3, 4), dtype=dtype)
M1 = def_model('softmax')
op_export_test('softmax_1', M1, [x], tmp_path)
M2 = def_model('softmax', use_length=True, axis=0)
M2 = def_model('softmax', use_length=True, axis=0, temperature=temperature)
l2 = mx.nd.array([[2,0,2,1],[1,1,2,1], [0,0,0,1]], dtype=int)
op_export_test('softmax_2', M2, [x, l2], tmp_path)
M3 = def_model('softmax', use_length=True, axis=-1)
M3 = def_model('softmax', use_length=True, axis=-1, temperature=temperature)
l3 = mx.nd.array([[2,0,4],[0,0,0]], dtype=int)
op_export_test('softmax_3', M3, [x, l3], tmp_path)
M4 = def_model('softmax', use_length=True, axis=1)
M4 = def_model('softmax', use_length=True, axis=1, temperature=temperature)
l4 = mx.nd.array([[2,0,3,1],[0,1,0,0]], dtype=int)
op_export_test('softmax_4', M4, [x, l4], tmp_path)

Expand Down Expand Up @@ -421,3 +422,15 @@ def test_onnx_export_contrib_box_decode(tmp_path, dtype, fmt, clip):
op_export_test('contrib_box_decode', M1, [data, anchors], tmp_path)
M2 = def_model('contrib.box_decode', format=fmt, clip=clip, std0=0.3, std1=1.4, std2=0.5, std3=1.6)
op_export_test('contrib_box_decode', M1, [data, anchors], tmp_path)

@pytest.mark.parametrize('dtype', ['float16', 'float32'])
def test_onnx_export_contrib_AdaptiveAvgPooling2D(tmp_path, dtype):
x = mx.nd.random.uniform(0, 1, (1, 2, 3, 4), dtype=dtype)
M1 = def_model('contrib.AdaptiveAvgPooling2D')
op_export_test('contrib_AdaptiveAvgPooling2D', M1, [x], tmp_path)
M2 = def_model('contrib.AdaptiveAvgPooling2D', output_size=1)
op_export_test('contrib_AdaptiveAvgPooling2D', M2, [x], tmp_path)
M3 = def_model('contrib.AdaptiveAvgPooling2D', output_size=[1])
op_export_test('contrib_AdaptiveAvgPooling2D', M3, [x], tmp_path)
M4 = def_model('contrib.AdaptiveAvgPooling2D', output_size=[1,1])
op_export_test('contrib_AdaptiveAvgPooling2D', M4, [x], tmp_path)

0 comments on commit e8e4fcd

Please sign in to comment.