Skip to content
This repository has been archived by the owner on Nov 17, 2023. It is now read-only.

Commit

Permalink
Improve GPU path for logsigmoid
Browse files Browse the repository at this point in the history
  • Loading branch information
bartekkuncer committed May 19, 2021
1 parent 41b781f commit c117e83
Show file tree
Hide file tree
Showing 11 changed files with 52 additions and 15 deletions.
7 changes: 4 additions & 3 deletions benchmark/opperf/nd_operations/nn_activation_operators.py
Original file line number Diff line number Diff line change
Expand Up @@ -36,9 +36,10 @@
8. Activation
8.1 relu
8.2 sigmoid
8.3 softrelu
8.4 softsign
8.5 tanh
8.3 log_sigmoid
8.4 softrelu
8.5 softsign
8.6 tanh
"""

Expand Down
2 changes: 1 addition & 1 deletion benchmark/opperf/rules/default_params.py
Original file line number Diff line number Diff line change
Expand Up @@ -375,7 +375,7 @@

# For NN operators
DEFAULT_ACT_TYPE_LR = ['leaky', 'elu', 'selu', 'gelu']
DEFAULT_ACT_TYPE_ACTIVATION = ['relu', 'sigmoid', 'softrelu', 'softsign', 'tanh']
DEFAULT_ACT_TYPE_ACTIVATION = ['relu', 'sigmoid', 'log_sigmoid', 'softrelu', 'softsign', 'tanh']
DEFAULT_LABEL_SOFTMAX = [(1024, 1024), (10000, 1), (10000, 100)]

DEFAULT_LABEL_SOFTMAX_LARGE_TENSOR = [(2**32, 1)]
Expand Down
8 changes: 8 additions & 0 deletions python/mxnet/ndarray/ndarray.py
Original file line number Diff line number Diff line change
Expand Up @@ -2171,6 +2171,14 @@ def log1p(self, *args, **kwargs):
"""
return op.log1p(self, *args, **kwargs)

def log_sigmoid(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`log_sigmoid`.
The arguments are the same as for :py:func:`log_sigmoid`, with
this array as data.
"""
return op.log_sigmoid(self, *args, **kwargs)

def sqrt(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`sqrt`.
Expand Down
8 changes: 8 additions & 0 deletions python/mxnet/numpy/multiarray.py
Original file line number Diff line number Diff line change
Expand Up @@ -2260,6 +2260,14 @@ def log1p(self, *args, **kwargs):
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute log1p')

def log_sigmoid(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`log_sigmoid`.
The arguments are the same as for :py:func:`log_sigmoid`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute log_sigmoid')

def sqrt(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`sqrt`.
Expand Down
16 changes: 8 additions & 8 deletions python/mxnet/symbol/symbol.py
Original file line number Diff line number Diff line change
Expand Up @@ -2519,6 +2519,14 @@ def log1p(self, *args, **kwargs):
"""
return op.log1p(self, *args, **kwargs)

def log_sigmoid(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`log_sigmoid`.
The arguments are the same as for :py:func:`log_sigmoid`, with
this array as data.
"""
return op.log_sigmoid(self, *args, **kwargs)

def sqrt(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`sqrt`.
Expand Down Expand Up @@ -2583,14 +2591,6 @@ def sigmoid(self, *args, **kwargs):
"""
return op.sigmoid(self, *args, **kwargs)

def log_sigmoid(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`log_sigmoid`.
The arguments are the same as for :py:func:`log_sigmoid`, with
this array as data.
"""
return op.log_sigmoid(self, *args, **kwargs)

def softmax(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`softmax`.
Expand Down
2 changes: 2 additions & 0 deletions src/api/operator/numpy_extension/npx_activation_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,8 @@ inline int String2MXNetActType(const std::string& s) {
return activation::kReLU;
} else if (s == "sigmoid") {
return activation::kSigmoid;
} else if (s == "log_sigmoid") {
return activation::kLogSigmoid;
} else if (s == "tanh") {
return activation::kTanh;
} else if (s == "softrelu") {
Expand Down
10 changes: 8 additions & 2 deletions src/common/cuda/rtc/backward_functions-inl.h
Original file line number Diff line number Diff line change
Expand Up @@ -40,8 +40,14 @@ backward_relu(const DTypeGrad grad, const DType val) {
template <typename DType, typename DTypeGrad>
__device__ inline mixed_type<DTypeGrad, DType>
backward_sigmoid(const DTypeGrad grad, const DType out) {
return grad * out * (1 - out);
backward_sigmoid(const DTypeGrad grad, const DType val) {
return grad * val * (1 - val);
}
template <typename DType, typename DTypeGrad>
__device__ inline mixed_type<DTypeGrad, DType>
backward_log_sigmoid(const DTypeGrad grad, const DType val) {
return grad * 1 / (1 + op::exp(val));
}
template <typename DType, typename DTypeGrad>
Expand Down
9 changes: 9 additions & 0 deletions src/common/cuda/rtc/forward_functions-inl.h
Original file line number Diff line number Diff line change
Expand Up @@ -685,6 +685,15 @@ __device__ inline DType sigmoid(const DType val) {
}
}
template <typename DType>
__device__ inline DType log_sigmoid(const DType val) {
if (type_util::has_double_or_integral<DType>::value) {
return ::log(1./(1 + ::exp(-val)));
} else {
return ::logf(1.f/(1 + expf(-val)));
}
}
template <typename DType>
__device__ inline DType softrelu(const DType val) {
if (type_util::has_double_or_integral<DType>::value) {
Expand Down
2 changes: 2 additions & 0 deletions src/operator/fusion/fused_op-inl.h
Original file line number Diff line number Diff line change
Expand Up @@ -56,6 +56,7 @@ const std::map<std::string, std::vector<std::vector<std::string>>> ops_desc = {
{"_backward_amp_cast" , {{"op::identity(%)", "_0"}}},
{"relu" , {{"op::relu(%)", "_0"}}},
{"sigmoid" , {{"op::sigmoid(%)", "_0"}}},
{"log_sigmoid" , {{"op::log_sigmoid(%)", "_0"}}},
{"softsign" , {{"op::softsign(%)", "_0"}}},
{"exp" , {{"op::exp(%)", "_0"}}},
{"expm1" , {{"op::expm1(%)", "_0"}}},
Expand Down Expand Up @@ -135,6 +136,7 @@ const std::map<std::string, std::vector<std::vector<std::string>>> ops_desc = {
{"logical_not" , {{"op::logical_not(%)", "_0"}}},
{"_backward_relu" , {{"op::backward_relu(%, %)", "_0", "_1"}}},
{"_backward_sigmoid" , {{"op::backward_sigmoid(%, %)", "_0", "_1"}}},
{"_backward_log_sigmoid" , {{"op::backward_log_sigmoid(%, %)", "_0", "_1"}}},
{"_backward_expm1" , {{"op::backward_expm1(%, %)", "_0", "_1"}}},
{"_backward_log" , {{"op::backward_log(%, %)", "_0", "_1"}}},
{"_backward_log10" , {{"op::backward_log10(%, %)", "_0", "_1"}}},
Expand Down
2 changes: 1 addition & 1 deletion src/operator/nn/activation-inl.h
Original file line number Diff line number Diff line change
Expand Up @@ -47,7 +47,7 @@ namespace activation {
enum ActivationOpInputs {kData};
enum ActivationOpOutputs {kOut};
enum ActivationOpResource {kTempSpace};
enum ActivationOpType {kReLU, kSigmoid, kTanh, kSoftReLU, kSoftSign, kLogSigmoid};
enum ActivationOpType {kReLU, kSigmoid, kLogSigmoid, kTanh, kSoftReLU, kSoftSign};

// Get the number of inputs to the gradient depending on the activation type
int GradNumInputs(int act_type);
Expand Down
1 change: 1 addition & 0 deletions tests/cpp/operator/activation_perf.cc
Original file line number Diff line number Diff line change
Expand Up @@ -43,6 +43,7 @@ TEST(ACTIVATION_PERF, ExecuteBidirectional) {
vector<string> activations = {
"relu",
"sigmoid",
"log_sigmoid",
"tanh",
"softrelu",
"softsign"
Expand Down

0 comments on commit c117e83

Please sign in to comment.