Skip to content

Update2023 #12

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Open
wants to merge 24 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Binary file modified .DS_Store
Binary file not shown.
64 changes: 0 additions & 64 deletions .github/workflows/tfaddons_github.yml

This file was deleted.

31 changes: 16 additions & 15 deletions .github/workflows/tfaddons_stable.yml
Original file line number Diff line number Diff line change
Expand Up @@ -12,15 +12,9 @@ jobs:
fail-fast: false
matrix:
config:
- { os: windows-latest, tf: '2.0.0', tensorflow-addons: '0.10.0'}
- { os: macOS-latest, tf: '2.0.0', tensorflow-addons: '0.10.0'}
- { os: ubuntu-16.04, cran: "https://demo.rstudiopm.com/all/__linux__/xenial/latest", tf: '2.0.0', tensorflow-addons: '0.10.0'}
- { os: windows-latest, tf: '2.1.0', tensorflow-addons: '0.10.0'}
- { os: macOS-latest, tf: '2.1.0', tensorflow-addons: '0.10.0'}
- { os: ubuntu-16.04, cran: "https://demo.rstudiopm.com/all/__linux__/xenial/latest", tf: '2.1.0', tensorflow-addons: '0.10.0'}
- { os: windows-latest, tf: '2.2.0', tensorflow-addons: '0.10.0'}
- { os: macOS-latest, tf: '2.2.0', tensorflow-addons: '0.10.0'}
- { os: ubuntu-16.04, cran: "https://demo.rstudiopm.com/all/__linux__/xenial/latest", tf: '2.2.0', tensorflow-addons: '0.10.0'}
- { os: windows-latest, tf: '2.13.0', tensorflow-addons: '0.21.0'}
- { os: macos-11, tf: '2.13.0', tensorflow-addons: '0.21.0'}
- { os: ubuntu-latest, tf: '2.13.0', tensorflow-addons: '0.21.0'}

env:
R_REMOTES_NO_ERRORS_FROM_WARNINGS: true
Expand All @@ -31,12 +25,18 @@ jobs:
CRAN: ${{ matrix.config.cran }}

steps:
- uses: actions/checkout@v1
- uses: actions/checkout@v2

- uses: r-lib/actions/setup-r@master
- uses: r-lib/actions/setup-r@v2
with:
r-version: '4.1.0'
- uses: r-lib/actions/setup-pandoc@master
r-version: '4.3.0'
- uses: r-lib/actions/setup-pandoc@v2

- name: Install dependencies Linux
if: runner.os == 'Linux'
run: |
sudo apt-get update && sudo apt-get upgrade
sudo apt-get install libcurl4-openssl-dev

- name: Install deps
run: |
Expand All @@ -48,13 +48,14 @@ jobs:
run: |
Rscript -e "install.packages('reticulate')"
Rscript -e "try(reticulate::install_miniconda())"
Rscript -e "reticulate::conda_create('r-reticulate', packages = 'python==3.6.10')"
Rscript -e "reticulate::conda_create('r-reticulate', packages = 'python==3.8')"
- name: Install TensorFlow
run: |
Rscript -e "remotes::install_local()"
Rscript -e "tensorflow::install_tensorflow(version = Sys.getenv('TF_VERSION'))"
Rscript -e "reticulate::py_install(c('setuptools', 'wheel', 'requests', 'tqdm'), pip = TRUE)"
Rscript -e "reticulate::py_install('tqdm', pip = TRUE)"
Rscript -e "tfaddons::install_tfaddons()"

- name: Check
continue-on-error: ${{ matrix.config.allow_failure }}

Expand Down
4 changes: 2 additions & 2 deletions DESCRIPTION
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
Package: tfaddons
Type: Package
Title: Interface to 'TensorFlow SIG Addons'
Version: 0.10.3
Version: 0.10.4
Authors@R: c(
person("Turgut", "Abdullayev", role = c("aut", "cre"),
email = "turqut.a.314@gmail.com")
Expand All @@ -21,7 +21,7 @@ BugReports: https://github.com/henry090/tfaddons/issues
SystemRequirements: TensorFlow >= 2.0 (https://www.tensorflow.org/)
Encoding: UTF-8
LazyData: true
RoxygenNote: 7.1.2
RoxygenNote: 7.2.3
Imports:
reticulate,
tensorflow,
Expand Down
27 changes: 6 additions & 21 deletions R/image_ops.R
Original file line number Diff line number Diff line change
Expand Up @@ -234,36 +234,21 @@ img_dense_image_warp <- function(image,
#' @description Equalize image(s)
#'
#'
#' @param image A tensor of shape (num_images, num_rows, num_columns, num_channels) (NHWC),
#' or (num_images, num_channels, num_rows, num_columns) (NCHW), or
#' (num_rows, num_columns, num_channels) (HWC), or (num_channels, num_rows, num_columns) (CHW),
#' or (num_rows, num_columns) (HW). The rank must be statically known (the shape is
#' not TensorShape(None)).
#' @param data_format Either 'channels_first' or 'channels_last'
#' @param name The name of the op. Returns: Image(s) with the same type and
#' shape as `images`, equalized.
#'
#' @examples
#'
#' \dontrun{
#' img_equalize(img)
#' }
#' @param image image
#' @param bins The number of bins in the histogram.
#' @param name The name of the op. Returns: Image(s) with the same type and shape as `images`, equalized.
#'
#' @return Image(s) with the same type and shape as `images`, equalized.
#'
#' @export
img_equalize <- function(image,
data_format = "channels_last",
name = NULL) {
img_equalize <- function(image, bins = 256, name = NULL) {

args <- list(
tfa$image$equalize(
image = image,
data_format = data_format,
bins = as.integer(bins),
name = name
)

do.call(tfa$image$equalize, args)

}


Expand Down
2 changes: 1 addition & 1 deletion R/install.R
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@ install_tfaddons <- function(version = NULL, ..., restart_session = TRUE) {
}

if (is.null(version))
module_string <- paste0("tensorflow-addons==", '0.16.1')
module_string <- paste0("tensorflow-addons==", '0.21.0')
else
module_string <- paste0("tensorflow-addons==", version)

Expand Down
1 change: 0 additions & 1 deletion R/losses.R
Original file line number Diff line number Diff line change
Expand Up @@ -242,7 +242,6 @@ attr(loss_pinball, "py_function_name") <- "pinball"

#' @title Sigmoid focal crossentropy loss
#'
#'
#' @param name (Optional) name for the loss.
#' @param alpha balancing factor.
#' @param gamma modulating factor.
Expand Down
2 changes: 1 addition & 1 deletion R/metrics.R
Original file line number Diff line number Diff line change
Expand Up @@ -396,7 +396,7 @@ metric_multilabel_confusion_matrix <- function(num_classes,

#' @title RSquare
#'
#' This is also called as coefficient of determination. It tells how close
#' @description This is also called as coefficient of determination. It tells how close
#' are data to the fitted regression line. Highest score can be 1.0 and it
#' indicates that the predictors perfectly accounts for variation in the target.
#' Score 0.0 indicates that the predictors do not account for variation in the
Expand Down
33 changes: 13 additions & 20 deletions R/optimizers.R
Original file line number Diff line number Diff line change
Expand Up @@ -53,38 +53,31 @@ optimizer_lazy_adam <- function(learning_rate = 0.001, beta_1 = 0.9, beta_2 = 0.
attr(optimizer_lazy_adam, "py_function_name") <- "lazy_adam"

#' @title Conditional Gradient
#'
#' @param ord Order of the norm. Supported values are 'fro' and 'nuclear'. Default is 'fro', which is frobenius norm.
#' @param learning_rate A Tensor or a floating point value, or a schedule that is a tf$keras$optimizers$schedules$LearningRateSchedule The learning rate.
#' @param lambda_ A Tensor or a floating point value. The constraint.
#' @param epsilon A Tensor or a floating point value. A small constant for numerical stability when handling the case of norm of gradient to be zero.
#' @param use_locking If True, use locks for update operations.
#' @param name Optional name prefix for the operations created when applying gradients. Defaults to 'ConditionalGradient'.
#' @param clipnorm is clip gradients by norm.
#' @param clipvalue is clip gradients by value.
#' @param decay is included for backward compatibility to allow time inverse decay of learning rate.
#' @param lr is included for backward compatibility, recommended to use learning_rate instead.
#' @param ... keyword arguments. Allowed to be {clipnorm, clipvalue, lr, decay}. clipnorm is clip gradients by norm; clipvalue is clip gradients by value, decay is included for backward compatibility to allow time inverse decay of learning rate. lr is included for backward compatibility, recommended to use learning_rate instead.
#' @return Optimizer for use with `keras::compile()`
#' @export
optimizer_conditional_gradient <- function(learning_rate, lambda_, epsilon = 1e-07, use_locking = FALSE,
name = 'ConditionalGradient',
clipnorm = NULL, clipvalue = NULL,
decay = NULL, lr = NULL) {
optimizer_conditional_gradient <- function(
learning_rate,
lambda_ = 0.01,
epsilon = 1e-07,
ord = 'fro',
name = 'ConditionalGradient',
...) {

args <- list(
learning_rate = learning_rate,
lambda_ = lambda_,
epsilon = epsilon,
use_locking = use_locking,
ord = ord,
name = name,
clipnorm = clipnorm,
clipvalue = clipvalue,
decay = decay,
lr = lr
...
)
args$clipnorm <- clipnorm
args$clipvalue <- clipvalue
args$decay <- decay
args$lr <- lr


do.call(tfa$optimizers$ConditionalGradient, args)
}
Expand Down Expand Up @@ -276,7 +269,7 @@ optimizer_radam <- function(learning_rate = 0.001,
# float for total_steps is here to be able to load models created before
# https://github.com/tensorflow/addons/pull/1375 was merged. It should be
# removed for Addons 0.11.
total_steps = total_steps,
total_steps = as.integer(total_steps),
warmup_proportion = warmup_proportion,
min_lr = min_lr,
name = name,
Expand Down
70 changes: 20 additions & 50 deletions R/optimizers_.R
Original file line number Diff line number Diff line change
Expand Up @@ -71,20 +71,14 @@ lookahead_mechanism <- function(optimizer,
#'
#' @param optimizer str or tf$keras$optimizers$Optimizer that will be used to compute
#' and apply gradients.
#' @param sequential_update Bool. If False, will compute the moving average at the same
#' time as the model is updated, potentially doing benign data races. If True, will update
#' the moving average after gradient updates.
#' @param average_decay float. Decay to use to maintain the moving averages of trained variables.
#' @param num_updates Optional count of the number of updates applied to variables.
#' @param name Optional name for the operations created when applying gradients.
#' Defaults to "MovingAverage".
#'
#' @param clipnorm is clip gradients by norm.
#' @param clipvalue is clip gradients by value.
#' @param decay is included for backward compatibility to allow time inverse decay of learning rate.
#' @param lr is included for backward compatibility, recommended to use learning_rate instead.
#'
#'
#' @param dynamic_decay bool. Whether to change the decay based on the number of optimizer updates. Decay will start at 0.1 and gradually increase up to average_decay after each optimizer update.
#' @param start_step int. What step to start the moving average.
#' @param ... keyword arguments. Allowed to be {clipnorm, clipvalue, lr, decay}. clipnorm is clip gradients by norm; clipvalue is clip gradients by value, decay is included for backward compatibility to allow time inverse decay of learning rate. lr is included for backward compatibility, recommended to use learning_rate instead.
#' @examples
#'
#' \dontrun{
Expand All @@ -97,31 +91,29 @@ lookahead_mechanism <- function(optimizer,
#' @return Optimizer for use with `keras::compile()`
#' @export
optimizer_moving_average <- function(optimizer,
sequential_update = TRUE,
average_decay = 0.99,
num_updates = NULL,
name = 'MovingAverage',
clipnorm = NULL, clipvalue = NULL,
decay = NULL, lr = NULL) {
average_decay = 0.99,
num_updates = NULL,
start_step = 0,
dynamic_decay = FALSE,
name = 'MovingAverage',
...) {

args = list(
optimizer = optimizer,
sequential_update = sequential_update,
average_decay = average_decay,
num_updates = num_updates,
start_step = as.integer(start_step),
dynamic_decay = dynamic_decay,
name = name,

clipnorm = clipnorm,
clipvalue = clipvalue,
decay = decay,
lr = lr
...

)

args$clipnorm <- clipnorm
args$clipvalue <- clipvalue
args$decay <- decay
args$lr <- lr
if(is.null(num_updates)) {
args$num_updates <- NULL
} else {
args$num_updates <- as.integer(args$num_updates)
}

do.call(tfa$optimizers$MovingAverage, args)

Expand Down Expand Up @@ -152,16 +144,7 @@ optimizer_moving_average <- function(optimizer,
#' @param average_period An integer. The synchronization period of SWA. The averaging occurs every
#' average_period steps. Averaging period needs to be >= 1.
#' @param name Optional name for the operations created when applying gradients. Defaults to 'SWA'.
#' @param sequential_update Bool. If FALSE, will compute the moving average at the same time as the
#' model is updated, potentially doing benign data races. If True, will update the moving average
#' after gradient updates
#'
#'
#' @param clipnorm is clip gradients by norm.
#' @param clipvalue is clip gradients by value.
#' @param decay is included for backward compatibility to allow time inverse decay of learning rate.
#' @param lr is included for backward compatibility, recommended to use learning_rate instead.
#'
#' @param ... keyword arguments. Allowed to be {clipnorm, clipvalue, lr, decay}. clipnorm is clip gradients by norm; clipvalue is clip gradients by value, decay is included for backward compatibility to allow time inverse decay of learning rate. lr is included for backward compatibility, recommended to use learning_rate instead.
#'
#' @examples
#'
Expand All @@ -176,29 +159,16 @@ optimizer_swa <- function(optimizer,
start_averaging = 0,
average_period = 10,
name = 'SWA',
sequential_update=TRUE,
clipnorm = NULL, clipvalue = NULL,
decay = NULL, lr = NULL) {
...) {

args = list(
optimizer = optimizer,
start_averaging = as.integer(start_averaging),
average_period = as.integer(average_period),
name = name,
sequential_update = sequential_update,

clipnorm = clipnorm,
clipvalue = clipvalue,
decay = decay,
lr = lr

...
)

args$clipnorm <- clipnorm
args$clipvalue <- clipvalue
args$decay <- decay
args$lr <- lr

do.call(tfa$optimizers$SWA, args)

}
Expand Down
1 change: 0 additions & 1 deletion R/rnn.R
Original file line number Diff line number Diff line change
@@ -1,6 +1,5 @@
#' @title LSTM cell with layer normalization and recurrent dropout.
#'
#'
#' @details This class adds layer normalization and recurrent dropout to a LSTM unit. Layer
#' normalization implementation is based on: https://arxiv.org/abs/1607.06450.
#' "Layer Normalization" Jimmy Lei Ba, Jamie Ryan Kiros, Geoffrey E. Hinton and is
Expand Down
Loading