Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add QDepthwiseConv2D, DepthwiseConv2D, DepthwiseConv1D support #834

Merged
merged 7 commits into from
Aug 28, 2023
Merged
21 changes: 19 additions & 2 deletions hls4ml/backends/vivado/passes/convolution_templates.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,14 @@
from hls4ml.backends.backend import get_backend
from hls4ml.backends.template import FunctionCallTemplate, LayerConfigTemplate
from hls4ml.model.layers import Conv1D, Conv2D, Conv2DBatchnorm, DepthwiseConv2D, SeparableConv1D, SeparableConv2D
from hls4ml.model.layers import (
Conv1D,
Conv2D,
Conv2DBatchnorm,
DepthwiseConv1D,
DepthwiseConv2D,
SeparableConv1D,
SeparableConv2D,
)

# Shared multiplication template

Expand Down Expand Up @@ -52,13 +60,16 @@
const ap_uint<config{index}::filt_width> config{index}::pixels[] = {{{instructions}}};\n"""

conv1d_function_template = 'nnet::conv_1d_{data_format}<{input_t}, {output_t}, {config}>({input}, {output}, {w}, {b});'
depthconv1d_function_template = (
'nnet::depthwise_conv_1d_{data_format}<{input_t}, {output_t}, {config}>({input}, {output}, {w}, {b});'
)

conv1d_include_list = ['nnet_utils/nnet_conv1d.h', 'nnet_utils/nnet_conv1d_stream.h']


class Conv1DConfigTemplate(LayerConfigTemplate):
def __init__(self):
super().__init__(Conv1D)
super().__init__((Conv1D, DepthwiseConv1D))
self.template = conv1d_config_template
self.mult_template = conv_mult_config_template

Expand Down Expand Up @@ -106,6 +117,12 @@ def format(self, node):
return self.template.format(**params)


class DepthwiseConv1DFunctionTemplate(Conv1DFunctionTemplate):
def __init__(self):
super(Conv1DFunctionTemplate, self).__init__(DepthwiseConv1D, include_header=sepconv1d_include_list)
self.template = depthconv1d_function_template


# Conv2D Templates

conv2d_config_template = """struct config{index} : nnet::conv2d_config {{
Expand Down
15 changes: 10 additions & 5 deletions hls4ml/converters/keras/convolution.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
from hls4ml.converters.utils import compute_padding_1d, compute_padding_2d, parse_data_format


@keras_handler('Conv1D', 'SeparableConv1D')
@keras_handler('Conv1D', 'SeparableConv1D', 'DepthwiseConv1D')
def parse_conv1d_layer(keras_layer, input_names, input_shapes, data_reader):
assert 'Conv1D' in keras_layer['class_name']

Expand All @@ -12,14 +12,19 @@ def parse_conv1d_layer(keras_layer, input_names, input_shapes, data_reader):

if layer['class_name'] in ['Conv1D', 'QConv1D']:
layer['weight_data'] = get_weights_data(data_reader, layer['name'], 'kernel')
else: # SeparableConv1D
layer['depthwise_data'], layer['pointwise_data'], layer['bias_data'] = get_weights_data(
data_reader, layer['name'], ['depthwise_kernel', 'pointwise_kernel', 'bias']
elif layer['class_name'] in ['SeparableConv1D', 'QSeparableConv1D']:
layer['depthwise_data'], layer['pointwise_data'] = get_weights_data(
data_reader, layer['name'], ['depthwise_kernel', 'pointwise_kernel']
)
else: # DepthwiseConv1D
layer['depthwise_data'] = get_weights_data(data_reader, layer['name'], 'depthwise_kernel')

layer['bias_data'] = get_weights_data(data_reader, layer['name'], 'bias')

layer['n_filt'] = keras_layer['config']['filters']
if 'filters' in keras_layer['config']:
layer['n_filt'] = keras_layer['config']['filters']
else:
layer['n_filt'] = layer['n_chan']
layer['filt_width'] = keras_layer['config']['kernel_size'][0]
layer['stride_width'] = keras_layer['config']['strides'][0]
layer['padding'] = keras_layer['config']['padding']
Expand Down
13 changes: 13 additions & 0 deletions hls4ml/converters/keras/qkeras.py
Original file line number Diff line number Diff line change
Expand Up @@ -49,6 +49,19 @@ def parse_qconv_layer(keras_layer, input_names, input_shapes, data_reader):
return layer, output_shape


@keras_handler('QDepthwiseConv2D')
def parse_qdepthwiseqconv_layer(keras_layer, input_names, input_shapes, data_reader):
layer, output_shape = parse_conv2d_layer(keras_layer, input_names, input_shapes, data_reader)

layer['depthwise_quantizer'] = get_quantizer_from_config(keras_layer, 'depthwise')
if keras_layer['config']['bias_quantizer'] is not None:
layer['bias_quantizer'] = get_quantizer_from_config(keras_layer, 'bias')
else:
layer['bias_quantizer'] = None

return layer, output_shape


@keras_handler('QActivation')
def parse_qactivation_layer(keras_layer, input_names, input_shapes, data_reader):
assert keras_layer['class_name'] == 'QActivation'
Expand Down
2 changes: 1 addition & 1 deletion hls4ml/converters/keras_to_hls.py
Original file line number Diff line number Diff line change
Expand Up @@ -301,7 +301,7 @@ def parse_keras_model(model_arch, reader):
act_layer['class_name'] = 'QActivation'
act_layer['config'] = {
'name': layer['name'] + '_' + act_details['class_name'],
'activation': act_details['class_name'],
'activation': act_details,
}
act_layer, output_shape = layer_handlers['QActivation'](act_layer, None, [output_shape], reader)
else:
Expand Down
19 changes: 19 additions & 0 deletions hls4ml/model/layers.py
Original file line number Diff line number Diff line change
Expand Up @@ -472,6 +472,23 @@ def initialize(self):
self.add_bias(quantizer=self.get_attr('bias_quantizer'))


class DepthwiseConv1D(Conv1D):
def initialize(self):
if self.get_attr('data_format') == 'channels_last':
shape = [self.attributes['out_width'], self.attributes['n_chan']]
dims = [f'OUT_HEIGHT_{self.index}', f'N_CHAN_{self.index}']
else:
shape = [self.attributes['n_chan'], self.attributes['out_width']]
dims = [f'N_CHAN_{self.index}', f'OUT_WIDTH_{self.index}']
self.add_output_variable(shape, dims)

self.add_weights_variable(
name='weight', var_name='w{index}', data='depthwise', quantizer=self.get_attr('depthwise_quantizer')
)

self.add_bias(quantizer=self.get_attr('bias_quantizer'))


class Conv2D(Layer):
_expected_attributes = [
Attribute('in_height'),
Expand Down Expand Up @@ -1314,8 +1331,10 @@ def initialize(self):
'QConv2D': Conv2D,
'QConv2DBatchnorm': Conv2DBatchnorm,
'SeparableConv1D': SeparableConv1D,
'DepthwiseConv1D': DepthwiseConv1D,
'SeparableConv2D': SeparableConv2D,
'DepthwiseConv2D': DepthwiseConv2D,
'QDepthwiseConv2D': DepthwiseConv2D,
'BatchNormalization': BatchNormalization,
'QBatchNormalization': BatchNormalization,
'MaxPooling1D': Pooling1D,
Expand Down
10 changes: 10 additions & 0 deletions hls4ml/templates/vitis/nnet_utils/nnet_sepconv1d_stream.h
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,16 @@ void depthwise_conv_1d_buffer_cl(hls::stream<data_T> &data, hls::stream<res_T> &
}
}

template <class data_T, class res_T, typename CONFIG_T>
void depthwise_conv_1d_cl(hls::stream<data_T> &data, hls::stream<res_T> &res,
typename CONFIG_T::weight_t weights[CONFIG_T::filt_width * CONFIG_T::n_chan],
typename CONFIG_T::bias_t biases[CONFIG_T::n_chan]) {
assert(CONFIG_T::implementation == conv_implementation::linebuffer &&
"Only \"linebuffer\" implementation is supported in Vitis HLS.");
#pragma HLS inline recursive
depthwise_conv_1d_buffer_cl<data_T, res_T, CONFIG_T>(data, res, weights, biases);
}

template <class data_T, class res_T, typename CONFIG_T>
void pointwise_conv_1d_cl(hls::stream<data_T> &data, hls::stream<res_T> &res,
typename CONFIG_T::weight_t weights[CONFIG_T::n_chan * CONFIG_T::n_filt],
Expand Down
11 changes: 11 additions & 0 deletions hls4ml/templates/vitis/nnet_utils/nnet_sepconv2d_stream.h
Original file line number Diff line number Diff line change
Expand Up @@ -92,6 +92,17 @@ void pointwise_conv_2d_cl(hls::stream<data_T> &data, hls::stream<res_T> &res,
}
}

template <class data_T, class res_T, typename CONFIG_T>
void depthwise_conv_2d_cl(
hls::stream<data_T> &data, hls::stream<res_T> &res,
typename CONFIG_T::weight_t weights[CONFIG_T::filt_height * CONFIG_T::filt_width * CONFIG_T::n_chan],
typename CONFIG_T::bias_t biases[CONFIG_T::n_chan]) {
assert(CONFIG_T::implementation == conv_implementation::linebuffer &&
"Only \"linebuffer\" implementation is supported in Vitis HLS.");
#pragma HLS inline recursive
depthwise_conv_2d_buffer_cl<data_T, res_T, CONFIG_T>(data, res, weights, biases);
}

template <class data_T, class res_T, typename CONFIG_T>
void separable_conv_2d_cl(hls::stream<data_T> &data, hls::stream<res_T> &res,
typename CONFIG_T::depthwise_config::weight_t
Expand Down
15 changes: 15 additions & 0 deletions hls4ml/templates/vivado/nnet_utils/nnet_sepconv1d_stream.h
Original file line number Diff line number Diff line change
Expand Up @@ -57,6 +57,21 @@ void depthwise_conv_1d_buffer_cl(hls::stream<data_T> &data, hls::stream<res_T> &
}
}

template <class data_T, class res_T, typename CONFIG_T>
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

For the sake of code brevity - this function can be directly called, instead of the switch statement on lines 112 - 122

Copy link
Contributor Author

@jmitrevs jmitrevs Jul 19, 2023

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

You are right. The reason is of course historic, since the function on 112 -122 existed before. But I think I'll make the suggested change.

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

We should check if using it this way it get inlined or not, otherwise it costs a cycle.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Anyway, I pushed the change. I expect that it would cause no difference in the produced code.

void depthwise_conv_1d_cl(hls::stream<data_T> &data, hls::stream<res_T> &res,
typename CONFIG_T::weight_t weights[CONFIG_T::filt_width * CONFIG_T::n_chan],
typename CONFIG_T::bias_t biases[CONFIG_T::n_chan]) {
#pragma HLS inline recursive
switch (CONFIG_T::implementation) {
case conv_implementation::linebuffer:
depthwise_conv_1d_buffer_cl<data_T, res_T, CONFIG_T>(data, res, weights, biases);
break;
case conv_implementation::encoded:
depthwise_conv_1d_encoded_cl<data_T, res_T, CONFIG_T>(data, res, weights, biases);
break;
}
}

template <class data_T, class res_T, typename CONFIG_T>
void pointwise_conv_1d_cl(hls::stream<data_T> &data, hls::stream<res_T> &res,
typename CONFIG_T::weight_t weights[CONFIG_T::n_chan * CONFIG_T::n_filt],
Expand Down
16 changes: 16 additions & 0 deletions hls4ml/templates/vivado/nnet_utils/nnet_sepconv2d_stream.h
Original file line number Diff line number Diff line change
Expand Up @@ -75,6 +75,22 @@ void depthwise_conv_2d_buffer_cl(
}
}

template <class data_T, class res_T, typename CONFIG_T>
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Same comment for using this function below, like in the 1D case.

void depthwise_conv_2d_cl(
hls::stream<data_T> &data, hls::stream<res_T> &res,
typename CONFIG_T::weight_t weights[CONFIG_T::filt_height * CONFIG_T::filt_width * CONFIG_T::n_chan],
typename CONFIG_T::bias_t biases[CONFIG_T::n_chan]) {
#pragma HLS inline recursive
switch (CONFIG_T::implementation) {
case conv_implementation::linebuffer:
depthwise_conv_2d_buffer_cl<data_T, res_T, CONFIG_T>(data, res, weights, biases);
break;
case conv_implementation::encoded:
depthwise_conv_2d_encoded_cl<data_T, res_T, CONFIG_T>(data, res, weights, biases);
break;
}
}

template <class data_T, class res_T, typename CONFIG_T>
void pointwise_conv_2d_cl(hls::stream<data_T> &data, hls::stream<res_T> &res,
typename CONFIG_T::weight_t weights[CONFIG_T::n_chan * CONFIG_T::n_filt],
Expand Down
54 changes: 54 additions & 0 deletions test/pytest/test_keras_api.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,8 @@
Conv1D,
Conv2D,
Dense,
DepthwiseConv1D,
DepthwiseConv2D,
LeakyReLU,
MaxPooling1D,
MaxPooling2D,
Expand Down Expand Up @@ -297,6 +299,58 @@ def test_conv2d(chans, padds, backend, io_type):
assert list(hls_model.get_layers())[1].attributes['pad_right'] == 0


# Currently only Vivado and Vitis is supported for io_stream.
@pytest.mark.parametrize('backend', ['Vivado', 'Vitis'])
@pytest.mark.parametrize('io_type', ['io_stream'])
def test_depthwise2d(backend, io_type):
'''
Test proper handling of DepthwiseConv2D
'''
X = np.random.rand(10, 32, 32, 3)
X = np.round(X * 2**10) * 2**-10 # make it an exact ap_fixed<16,6>
model = tf.keras.models.Sequential()
model.add(DepthwiseConv2D(kernel_size=(3, 3), input_shape=(32, 32, 3)))
model.compile()

config = hls4ml.utils.config_from_keras_model(model, granularity='name')
output_dir = str(test_root_path / f'hls4mlprj_keras_api_depthwiseconv2d_{backend}_{io_type}')
hls_model = hls4ml.converters.convert_from_keras_model(
model, hls_config=config, output_dir=output_dir, backend=backend, io_type=io_type
)
hls_model.compile()

y_qkeras = model.predict(X)
y_hls4ml = hls_model.predict(X)

np.testing.assert_allclose(y_qkeras, y_hls4ml.reshape(y_qkeras.shape), rtol=1e-2, atol=0.01)


# Currently only Vivado and Vitis is supported for io_stream.
@pytest.mark.parametrize('backend', ['Vivado', 'Vitis'])
@pytest.mark.parametrize('io_type', ['io_stream'])
def test_depthwise1d(backend, io_type):
'''
Test proper handling of QConv2DBatchnorm.
jmitrevs marked this conversation as resolved.
Show resolved Hide resolved
'''
X = np.random.rand(10, 32, 3)
X = np.round(X * 2**10) * 2**-10 # make it an exact ap_fixed<16,6>
model = tf.keras.models.Sequential()
model.add(DepthwiseConv1D(kernel_size=3, input_shape=(32, 3)))
model.compile()

config = hls4ml.utils.config_from_keras_model(model, granularity='name')
output_dir = str(test_root_path / f'hls4mlprj_keras_api_depthwiseconv1d_{backend}_{io_type}')
hls_model = hls4ml.converters.convert_from_keras_model(
model, hls_config=config, output_dir=output_dir, backend=backend, io_type=io_type
)
hls_model.compile()

y_qkeras = model.predict(X)
y_hls4ml = hls_model.predict(X)

np.testing.assert_allclose(y_qkeras, y_hls4ml.reshape(y_qkeras.shape), rtol=1e-2, atol=0.01)


pooling_layers = [MaxPooling1D, MaxPooling2D, AveragePooling1D, AveragePooling2D]


Expand Down
43 changes: 43 additions & 0 deletions test/pytest/test_qkeras.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@
import numpy as np
import pytest
from qkeras.qconv2d_batchnorm import QConv2DBatchnorm
from qkeras.qconvolutional import QDepthwiseConv2D
from qkeras.qlayers import QActivation, QDense
from qkeras.quantizers import (
binary,
Expand Down Expand Up @@ -400,6 +401,48 @@ def test_qconv2dbn(randX_100_8_8_1, backend, io_type):
np.testing.assert_array_equal(y_qkeras, y_hls4ml.reshape(y_qkeras.shape))


@pytest.fixture(scope='module')
def randX_10_32_32_3():
return np.random.rand(10, 32, 32, 3)


# Currently only Vivado and Vitis is supported for io_stream.
# Note, qkeras only supports 2d version of depthwise
@pytest.mark.parametrize('backend', ['Vivado', 'Vitis'])
@pytest.mark.parametrize('io_type', ['io_stream'])
def test_qdepthwiseconv2d(randX_10_32_32_3, backend, io_type):
'''
Test proper handling of QConv2DBatchnorm.
'''
X = randX_10_32_32_3
X = np.round(X * 2**10) * 2**-10 # make it an exact ap_fixed<16,6>
model = Sequential()
model.add(
QDepthwiseConv2D(
kernel_size=(3, 3),
input_shape=(32, 32, 3),
depthwise_quantizer='quantized_bits(6, 0, alpha=1)',
depthwise_initializer='ones',
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Isn't this test cheating a bit? All the weights will be 1s and the biases 0s, no point in using quantizers. It doesn't really test the proper handling of quantizers and the accuracy of the implementation.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

That's a leftover from me playing around with the initializer. Let me find better ones. For depthwise, the default is probably good ("he_normal"), so I'll move to that. The default for bias is 0. Let me see if I should change it.

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Probably worth changing the bias initialiser - just to be sure the bias is not ignored in the C++ implementation.

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Hi @jmitrevs .
Did you tested with large kernel size?
For example: model.add(DepthwiseConv2D(kernel_size=(16,16), input_shape=(32, 32, 24))).

I'm facing some errors regarding parameters that are bigger than the int256 representation in the parameters.h file.

bias_quantizer='quantized_bits(4, 0, alpha=1)',
bias_initializer='zeros',
activation='quantized_relu(3, 0)',
)
)
model.compile()

config = hls4ml.utils.config_from_keras_model(model, granularity='name')
output_dir = str(test_root_path / f'hls4mlprj_qkeras_qdepthwiseconv2d_{backend}_{io_type}')
hls_model = hls4ml.converters.convert_from_keras_model(
model, hls_config=config, output_dir=output_dir, backend=backend, io_type=io_type
)
hls_model.compile()

y_qkeras = model.predict(X)
y_hls4ml = hls_model.predict(X)

np.testing.assert_allclose(y_qkeras, y_hls4ml.reshape(y_qkeras.shape), rtol=1e-2, atol=0.01)


@pytest.mark.parametrize('backend', ['Vivado', 'Vitis', 'Quartus'])
@pytest.mark.parametrize('io_type', ['io_parallel', 'io_stream'])
@pytest.mark.parametrize('strategy', ['Latency', 'Resource'])
Expand Down
Loading