diff --git a/.DS_Store b/.DS_Store index 816491b..3e5662f 100644 Binary files a/.DS_Store and b/.DS_Store differ diff --git a/.github/workflows/tfaddons_github.yml b/.github/workflows/tfaddons_github.yml deleted file mode 100644 index bef8829..0000000 --- a/.github/workflows/tfaddons_github.yml +++ /dev/null @@ -1,64 +0,0 @@ -on: [pull_request] - -name: TFA - -jobs: - R-CMD: - runs-on: ${{ matrix.config.os }} - - name: ${{ matrix.config.os }} (TF ${{ matrix.config.tf }}) (from Github) - - strategy: - fail-fast: false - matrix: - config: - - { os: windows-latest, tf: '2.0.0', tensorflow-addons: 'git+git://github.com/tensorflow/addons@master'} - - { os: macOS-latest, tf: '2.0.0', tensorflow-addons: 'git+git://github.com/tensorflow/addons@master'} - - { os: ubuntu-16.04, cran: "https://demo.rstudiopm.com/all/__linux__/xenial/latest", tf: '2.0.0', tensorflow-addons: 'git+git://github.com/tensorflow/addons@master'} - - { os: windows-latest, tf: '2.1.0', tensorflow-addons: 'git+git://github.com/tensorflow/addons@master'} - - { os: macOS-latest, tf: '2.1.0', tensorflow-addons: 'git+git://github.com/tensorflow/addons@master'} - - { os: ubuntu-16.04, cran: "https://demo.rstudiopm.com/all/__linux__/xenial/latest", tf: '2.1.0', tensorflow-addons: 'git+git://github.com/tensorflow/addons@master'} - - { os: windows-latest, tf: '2.2.0', tensorflow-addons: 'git+git://github.com/tensorflow/addons@master'} - - { os: macOS-latest, tf: '2.2.0', tensorflow-addons: 'git+git://github.com/tensorflow/addons@master'} - - { os: ubuntu-16.04, cran: "https://demo.rstudiopm.com/all/__linux__/xenial/latest", tf: '2.2.0', tensorflow-addons: 'git+git://github.com/tensorflow/addons@master'} - - { os: windows-latest, tf: '2.3.0', tensorflow-addons: 'git+git://github.com/tensorflow/addons@master'} - - { os: macOS-latest, tf: '2.3.0', tensorflow-addons: 'git+git://github.com/tensorflow/addons@master'} - - { os: ubuntu-16.04, cran: "https://demo.rstudiopm.com/all/__linux__/xenial/latest", tf: '2.3.0', tensorflow-addons: 'git+git://github.com/tensorflow/addons@master'} - - env: - R_REMOTES_NO_ERRORS_FROM_WARNINGS: true - TF_VERSION: ${{ matrix.config.tf }} - TENSORFLOW_ADDONS_VERSION: ${{ matrix.config.tensorflow-addons }} - PIP_NO_WARN_SCRIPT_LOCATION: false - RETICULATE_AUTOCONFIGURE: 'FALSE' - CRAN: ${{ matrix.config.cran }} - - steps: - - uses: actions/checkout@v1 - - - uses: r-lib/actions/setup-r@master - - - uses: r-lib/actions/setup-pandoc@master - - - name: Install deps - run: | - Rscript -e "install.packages('remotes')" - Rscript -e "remotes::install_deps(dependencies = TRUE)" - Rscript -e "remotes::install_cran('rcmdcheck')" - - - name: Install Python - run: | - Rscript -e "install.packages('reticulate')" - Rscript -e "try(reticulate::install_miniconda())" - Rscript -e "reticulate::conda_create('r-reticulate', packages = 'python==3.6.10')" - - name: Install TensorFlow - run: | - Rscript -e "remotes::install_local()" - Rscript -e "tensorflow::install_tensorflow(version = Sys.getenv('TF_VERSION'))" - Rscript -e "reticulate::py_install(c('setuptools', 'wheel', 'requests', 'tqdm'), pip = TRUE)" - - name: Check - continue-on-error: ${{ matrix.config.allow_failure }} - - - - run: Rscript -e "rcmdcheck::rcmdcheck(args = '--no-manual', error_on = 'warning', check_dir = 'check')" diff --git a/.github/workflows/tfaddons_stable.yml b/.github/workflows/tfaddons_stable.yml index f82e0ef..39088ba 100644 --- a/.github/workflows/tfaddons_stable.yml +++ b/.github/workflows/tfaddons_stable.yml @@ -12,15 +12,9 @@ jobs: fail-fast: false matrix: config: - - { os: windows-latest, tf: '2.0.0', tensorflow-addons: '0.10.0'} - - { os: macOS-latest, tf: '2.0.0', tensorflow-addons: '0.10.0'} - - { os: ubuntu-16.04, cran: "https://demo.rstudiopm.com/all/__linux__/xenial/latest", tf: '2.0.0', tensorflow-addons: '0.10.0'} - - { os: windows-latest, tf: '2.1.0', tensorflow-addons: '0.10.0'} - - { os: macOS-latest, tf: '2.1.0', tensorflow-addons: '0.10.0'} - - { os: ubuntu-16.04, cran: "https://demo.rstudiopm.com/all/__linux__/xenial/latest", tf: '2.1.0', tensorflow-addons: '0.10.0'} - - { os: windows-latest, tf: '2.2.0', tensorflow-addons: '0.10.0'} - - { os: macOS-latest, tf: '2.2.0', tensorflow-addons: '0.10.0'} - - { os: ubuntu-16.04, cran: "https://demo.rstudiopm.com/all/__linux__/xenial/latest", tf: '2.2.0', tensorflow-addons: '0.10.0'} + - { os: windows-latest, tf: '2.13.0', tensorflow-addons: '0.21.0'} + - { os: macos-11, tf: '2.13.0', tensorflow-addons: '0.21.0'} + - { os: ubuntu-latest, tf: '2.13.0', tensorflow-addons: '0.21.0'} env: R_REMOTES_NO_ERRORS_FROM_WARNINGS: true @@ -31,12 +25,18 @@ jobs: CRAN: ${{ matrix.config.cran }} steps: - - uses: actions/checkout@v1 + - uses: actions/checkout@v2 - - uses: r-lib/actions/setup-r@master + - uses: r-lib/actions/setup-r@v2 with: - r-version: '4.1.0' - - uses: r-lib/actions/setup-pandoc@master + r-version: '4.3.0' + - uses: r-lib/actions/setup-pandoc@v2 + + - name: Install dependencies Linux + if: runner.os == 'Linux' + run: | + sudo apt-get update && sudo apt-get upgrade + sudo apt-get install libcurl4-openssl-dev - name: Install deps run: | @@ -48,13 +48,14 @@ jobs: run: | Rscript -e "install.packages('reticulate')" Rscript -e "try(reticulate::install_miniconda())" - Rscript -e "reticulate::conda_create('r-reticulate', packages = 'python==3.6.10')" + Rscript -e "reticulate::conda_create('r-reticulate', packages = 'python==3.8')" - name: Install TensorFlow run: | Rscript -e "remotes::install_local()" Rscript -e "tensorflow::install_tensorflow(version = Sys.getenv('TF_VERSION'))" - Rscript -e "reticulate::py_install(c('setuptools', 'wheel', 'requests', 'tqdm'), pip = TRUE)" + Rscript -e "reticulate::py_install('tqdm', pip = TRUE)" Rscript -e "tfaddons::install_tfaddons()" + - name: Check continue-on-error: ${{ matrix.config.allow_failure }} diff --git a/DESCRIPTION b/DESCRIPTION index babf8a2..2330547 100644 --- a/DESCRIPTION +++ b/DESCRIPTION @@ -1,7 +1,7 @@ Package: tfaddons Type: Package Title: Interface to 'TensorFlow SIG Addons' -Version: 0.10.3 +Version: 0.10.4 Authors@R: c( person("Turgut", "Abdullayev", role = c("aut", "cre"), email = "turqut.a.314@gmail.com") @@ -21,7 +21,7 @@ BugReports: https://github.com/henry090/tfaddons/issues SystemRequirements: TensorFlow >= 2.0 (https://www.tensorflow.org/) Encoding: UTF-8 LazyData: true -RoxygenNote: 7.1.2 +RoxygenNote: 7.2.3 Imports: reticulate, tensorflow, diff --git a/R/image_ops.R b/R/image_ops.R index 0027d1a..7f5a057 100644 --- a/R/image_ops.R +++ b/R/image_ops.R @@ -234,36 +234,21 @@ img_dense_image_warp <- function(image, #' @description Equalize image(s) #' #' -#' @param image A tensor of shape (num_images, num_rows, num_columns, num_channels) (NHWC), -#' or (num_images, num_channels, num_rows, num_columns) (NCHW), or -#' (num_rows, num_columns, num_channels) (HWC), or (num_channels, num_rows, num_columns) (CHW), -#' or (num_rows, num_columns) (HW). The rank must be statically known (the shape is -#' not TensorShape(None)). -#' @param data_format Either 'channels_first' or 'channels_last' -#' @param name The name of the op. Returns: Image(s) with the same type and -#' shape as `images`, equalized. -#' -#' @examples -#' -#' \dontrun{ -#' img_equalize(img) -#' } +#' @param image image +#' @param bins The number of bins in the histogram. +#' @param name The name of the op. Returns: Image(s) with the same type and shape as `images`, equalized. #' #' @return Image(s) with the same type and shape as `images`, equalized. #' #' @export -img_equalize <- function(image, - data_format = "channels_last", - name = NULL) { +img_equalize <- function(image, bins = 256, name = NULL) { - args <- list( + tfa$image$equalize( image = image, - data_format = data_format, + bins = as.integer(bins), name = name ) - do.call(tfa$image$equalize, args) - } diff --git a/R/install.R b/R/install.R index f811e04..5220583 100644 --- a/R/install.R +++ b/R/install.R @@ -17,7 +17,7 @@ install_tfaddons <- function(version = NULL, ..., restart_session = TRUE) { } if (is.null(version)) - module_string <- paste0("tensorflow-addons==", '0.16.1') + module_string <- paste0("tensorflow-addons==", '0.21.0') else module_string <- paste0("tensorflow-addons==", version) diff --git a/R/losses.R b/R/losses.R index 748636c..0dbd251 100644 --- a/R/losses.R +++ b/R/losses.R @@ -242,7 +242,6 @@ attr(loss_pinball, "py_function_name") <- "pinball" #' @title Sigmoid focal crossentropy loss #' -#' #' @param name (Optional) name for the loss. #' @param alpha balancing factor. #' @param gamma modulating factor. diff --git a/R/metrics.R b/R/metrics.R index 96d79f5..ad203e5 100644 --- a/R/metrics.R +++ b/R/metrics.R @@ -396,7 +396,7 @@ metric_multilabel_confusion_matrix <- function(num_classes, #' @title RSquare #' -#' This is also called as coefficient of determination. It tells how close +#' @description This is also called as coefficient of determination. It tells how close #' are data to the fitted regression line. Highest score can be 1.0 and it #' indicates that the predictors perfectly accounts for variation in the target. #' Score 0.0 indicates that the predictors do not account for variation in the diff --git a/R/optimizers.R b/R/optimizers.R index fb53c86..a6fe2a9 100644 --- a/R/optimizers.R +++ b/R/optimizers.R @@ -53,38 +53,31 @@ optimizer_lazy_adam <- function(learning_rate = 0.001, beta_1 = 0.9, beta_2 = 0. attr(optimizer_lazy_adam, "py_function_name") <- "lazy_adam" #' @title Conditional Gradient -#' +#' @param ord Order of the norm. Supported values are 'fro' and 'nuclear'. Default is 'fro', which is frobenius norm. #' @param learning_rate A Tensor or a floating point value, or a schedule that is a tf$keras$optimizers$schedules$LearningRateSchedule The learning rate. #' @param lambda_ A Tensor or a floating point value. The constraint. #' @param epsilon A Tensor or a floating point value. A small constant for numerical stability when handling the case of norm of gradient to be zero. -#' @param use_locking If True, use locks for update operations. #' @param name Optional name prefix for the operations created when applying gradients. Defaults to 'ConditionalGradient'. -#' @param clipnorm is clip gradients by norm. -#' @param clipvalue is clip gradients by value. -#' @param decay is included for backward compatibility to allow time inverse decay of learning rate. -#' @param lr is included for backward compatibility, recommended to use learning_rate instead. +#' @param ... keyword arguments. Allowed to be {clipnorm, clipvalue, lr, decay}. clipnorm is clip gradients by norm; clipvalue is clip gradients by value, decay is included for backward compatibility to allow time inverse decay of learning rate. lr is included for backward compatibility, recommended to use learning_rate instead. #' @return Optimizer for use with `keras::compile()` #' @export -optimizer_conditional_gradient <- function(learning_rate, lambda_, epsilon = 1e-07, use_locking = FALSE, - name = 'ConditionalGradient', - clipnorm = NULL, clipvalue = NULL, - decay = NULL, lr = NULL) { +optimizer_conditional_gradient <- function( + learning_rate, + lambda_ = 0.01, + epsilon = 1e-07, + ord = 'fro', + name = 'ConditionalGradient', + ...) { args <- list( learning_rate = learning_rate, lambda_ = lambda_, epsilon = epsilon, - use_locking = use_locking, + ord = ord, name = name, - clipnorm = clipnorm, - clipvalue = clipvalue, - decay = decay, - lr = lr + ... ) - args$clipnorm <- clipnorm - args$clipvalue <- clipvalue - args$decay <- decay - args$lr <- lr + do.call(tfa$optimizers$ConditionalGradient, args) } @@ -276,7 +269,7 @@ optimizer_radam <- function(learning_rate = 0.001, # float for total_steps is here to be able to load models created before # https://github.com/tensorflow/addons/pull/1375 was merged. It should be # removed for Addons 0.11. - total_steps = total_steps, + total_steps = as.integer(total_steps), warmup_proportion = warmup_proportion, min_lr = min_lr, name = name, diff --git a/R/optimizers_.R b/R/optimizers_.R index 3998e34..edd1d11 100644 --- a/R/optimizers_.R +++ b/R/optimizers_.R @@ -71,20 +71,14 @@ lookahead_mechanism <- function(optimizer, #' #' @param optimizer str or tf$keras$optimizers$Optimizer that will be used to compute #' and apply gradients. -#' @param sequential_update Bool. If False, will compute the moving average at the same -#' time as the model is updated, potentially doing benign data races. If True, will update -#' the moving average after gradient updates. #' @param average_decay float. Decay to use to maintain the moving averages of trained variables. #' @param num_updates Optional count of the number of updates applied to variables. #' @param name Optional name for the operations created when applying gradients. #' Defaults to "MovingAverage". #' -#' @param clipnorm is clip gradients by norm. -#' @param clipvalue is clip gradients by value. -#' @param decay is included for backward compatibility to allow time inverse decay of learning rate. -#' @param lr is included for backward compatibility, recommended to use learning_rate instead. -#' -#' +#' @param dynamic_decay bool. Whether to change the decay based on the number of optimizer updates. Decay will start at 0.1 and gradually increase up to average_decay after each optimizer update. +#' @param start_step int. What step to start the moving average. +#' @param ... keyword arguments. Allowed to be {clipnorm, clipvalue, lr, decay}. clipnorm is clip gradients by norm; clipvalue is clip gradients by value, decay is included for backward compatibility to allow time inverse decay of learning rate. lr is included for backward compatibility, recommended to use learning_rate instead. #' @examples #' #' \dontrun{ @@ -97,31 +91,29 @@ lookahead_mechanism <- function(optimizer, #' @return Optimizer for use with `keras::compile()` #' @export optimizer_moving_average <- function(optimizer, - sequential_update = TRUE, - average_decay = 0.99, - num_updates = NULL, - name = 'MovingAverage', - clipnorm = NULL, clipvalue = NULL, - decay = NULL, lr = NULL) { + average_decay = 0.99, + num_updates = NULL, + start_step = 0, + dynamic_decay = FALSE, + name = 'MovingAverage', + ...) { args = list( optimizer = optimizer, - sequential_update = sequential_update, average_decay = average_decay, num_updates = num_updates, + start_step = as.integer(start_step), + dynamic_decay = dynamic_decay, name = name, - - clipnorm = clipnorm, - clipvalue = clipvalue, - decay = decay, - lr = lr + ... ) - args$clipnorm <- clipnorm - args$clipvalue <- clipvalue - args$decay <- decay - args$lr <- lr + if(is.null(num_updates)) { + args$num_updates <- NULL + } else { + args$num_updates <- as.integer(args$num_updates) + } do.call(tfa$optimizers$MovingAverage, args) @@ -152,16 +144,7 @@ optimizer_moving_average <- function(optimizer, #' @param average_period An integer. The synchronization period of SWA. The averaging occurs every #' average_period steps. Averaging period needs to be >= 1. #' @param name Optional name for the operations created when applying gradients. Defaults to 'SWA'. -#' @param sequential_update Bool. If FALSE, will compute the moving average at the same time as the -#' model is updated, potentially doing benign data races. If True, will update the moving average -#' after gradient updates -#' -#' -#' @param clipnorm is clip gradients by norm. -#' @param clipvalue is clip gradients by value. -#' @param decay is included for backward compatibility to allow time inverse decay of learning rate. -#' @param lr is included for backward compatibility, recommended to use learning_rate instead. -#' +#' @param ... keyword arguments. Allowed to be {clipnorm, clipvalue, lr, decay}. clipnorm is clip gradients by norm; clipvalue is clip gradients by value, decay is included for backward compatibility to allow time inverse decay of learning rate. lr is included for backward compatibility, recommended to use learning_rate instead. #' #' @examples #' @@ -176,29 +159,16 @@ optimizer_swa <- function(optimizer, start_averaging = 0, average_period = 10, name = 'SWA', - sequential_update=TRUE, - clipnorm = NULL, clipvalue = NULL, - decay = NULL, lr = NULL) { + ...) { args = list( optimizer = optimizer, start_averaging = as.integer(start_averaging), average_period = as.integer(average_period), name = name, - sequential_update = sequential_update, - - clipnorm = clipnorm, - clipvalue = clipvalue, - decay = decay, - lr = lr - + ... ) - args$clipnorm <- clipnorm - args$clipvalue <- clipvalue - args$decay <- decay - args$lr <- lr - do.call(tfa$optimizers$SWA, args) } diff --git a/R/rnn.R b/R/rnn.R index e5a1f0c..126f077 100644 --- a/R/rnn.R +++ b/R/rnn.R @@ -1,6 +1,5 @@ #' @title LSTM cell with layer normalization and recurrent dropout. #' -#' #' @details This class adds layer normalization and recurrent dropout to a LSTM unit. Layer #' normalization implementation is based on: https://arxiv.org/abs/1607.06450. #' "Layer Normalization" Jimmy Lei Ba, Jamie Ryan Kiros, Geoffrey E. Hinton and is diff --git a/R/seq2seq.R b/R/seq2seq.R index fcfc405..8ce112f 100644 --- a/R/seq2seq.R +++ b/R/seq2seq.R @@ -1,7 +1,5 @@ #' @title Attention Wrapper #' -#' -#' #' @param object Model or layer object #' @param cell An instance of RNNCell. #' @param attention_mechanism A list of AttentionMechanism instances or a single instance. @@ -348,8 +346,6 @@ decoder_basic <- function(object, #' @title Basic decoder output #' -#' -#' #' @param rnn_output the output of RNN cell #' @param sample_id the `id` of the sample #' @return None @@ -429,8 +425,6 @@ decoder_beam_search <- function(object, #' @title Beam Search Decoder Output #' -#' -#' #' @param scores calculate the scores for each beam #' @param predicted_ids The final prediction. A tensor of shape #' `[batch_size, T, beam_width]` (or `[T, batch_size, beam_width]` if `output_time_major` @@ -455,7 +449,6 @@ decoder_beam_search_output <- function(scores, predicted_ids, parent_ids) { #' @title Beam Search Decoder State #' -#' #' @param cell_state cell_state #' @param log_probs log_probs #' @param finished finished @@ -481,10 +474,6 @@ decoder_beam_search_state <- function(cell_state, log_probs, #' @title Base abstract class that allows the user to customize sampling. #' -#' -#' -#' -#' #' @param initialize_fn callable that returns (finished, next_inputs) for the first iteration. #' @param sample_fn callable that takes (time, outputs, state) and emits tensor sample_ids. #' @param next_inputs_fn callable that takes (time, outputs, state, sample_ids) and emits @@ -623,7 +612,6 @@ decoder_final_beam_search_output <- function(predicted_ids, beam_search_decoder_ #' @title Gather tree #' -#' #' @param step_ids requires the step id #' @param parent_ids The parent ids of shape `[max_time, batch_size, beam_width]`. #' @param max_sequence_lengths get max_sequence_length across all beams for each batch. @@ -733,10 +721,8 @@ hardmax <- function(logits, name = NULL) { #' @title Inference Sampler #' -#' #' @details A helper to use during inference with a custom sampling function. #' -#' #' @param sample_fn A callable that takes outputs and emits tensor sample_ids. #' @param sample_shape Either a list of integers, or a 1-D Tensor of type int32, #' the shape of the each sample in the batch returned by sample_fn. @@ -972,13 +958,10 @@ safe_cumprod <- function(x, ...) { #' @title Sample Embedding Sampler #' -#' -#' #' @description A sampler for use during inference. #' @details Uses sampling (from a distribution) instead of argmax and passes #' the result through an embedding layer to get the next input. #' -#' #' @param embedding_fn (Optional) A callable that takes a vector tensor of ids (argmax ids), #' or the params argument for embedding_lookup. The returned tensor will be passed to the #' decoder input. @@ -1011,10 +994,8 @@ sampler_sample_embedding <- function(embedding_fn = NULL, #' @title Sampler -#' @description Interface for implementing sampling in seq2seq decoders. -#' -#' #' +#' @description Interface for implementing sampling in seq2seq decoders. #' #' @param ... parametr to pass batch_size, initialize, next_inputs, sample, sample_ids_dtype, sample_ids_shape #' @@ -1031,10 +1012,6 @@ sampler <- function(...) { #' @title A training sampler that adds scheduled sampling #' -#' -#' -#' -#' #' @param sampling_probability A float32 0-D or 1-D tensor: the probability of sampling #' categorically from the output ids instead of reading directly from the inputs. #' @param embedding_fn A callable that takes a vector tensor of ids (argmax ids), or the @@ -1162,12 +1139,8 @@ tile_batch <- function(t, multiplier, name = NULL) { #' @title A Sampler for use during training. #' -#' #' @description Only reads inputs. #' -#' -#' -#' #' @param time_major bool. Whether the tensors in inputs are time major. #' If `FALSE` (default), they are assumed to be batch major. #' diff --git a/README.md b/README.md index 56d0d07..7e66b8e 100644 --- a/README.md +++ b/README.md @@ -6,14 +6,10 @@ The `tfaddons` package provides R wrappers to [TensorFlow Addons](https://www.te __TensorFlow Addons__ is a repository of contributions that conform to well-established API patterns, but implement new functionality not available in core TensorFlow. TensorFlow natively supports a large number of operators, layers, metrics, losses, and optimizers. However, in a fast moving field like ML, there are many interesting new developments that cannot be integrated into core TensorFlow (because their broad applicability is not yet clear, or it is mostly used by a smaller subset of the community). -[![Actions Status](https://github.com/henry090/tfaddons/workflows/TFA_stable/badge.svg)](https://github.com/henry090/tfaddons) -[![Actions Status](https://github.com/henry090/tfaddons/workflows/TFA/badge.svg)](https://github.com/henry090/tfaddons) +[![Actions Status](https://github.com/eagerai/tfaddons/workflows/TFA_stable/badge.svg)](https://github.com/eagerai/tfaddons) [![CRAN](https://www.r-pkg.org/badges/version/tfaddons?color=darkgreen)](https://cran.r-project.org/package=tfaddons) -[![Lifecycle: experimental](https://img.shields.io/badge/lifecycle-experimental-orange.svg)](https://www.tidyverse.org/lifecycle/#experimental) [![Last month downloads](http://cranlogs.r-pkg.org/badges/last-month/tfaddons?color=green)](https://cran.r-project.org/package=tfaddons) -[![Last commit](https://img.shields.io/github/last-commit/henry090/tfaddons.svg)](https://github.com/henry090/tfaddons/commits/master) - -![](https://img.shields.io/docker/cloud/build/turqut314/tfaddons?style=plastic) +[![Last commit](https://img.shields.io/github/last-commit/eagerai/tfaddons.svg)](https://github.com/eagerai/tfaddons/commits/master) TF-addons @@ -39,7 +35,7 @@ Requirements: The dev version: ``` -devtools::install_github('henry090/tfaddons') +devtools::install_github('eagerai/tfaddons') ``` Later, you need to install the python module *tensorflow-addons*: diff --git a/man/img_equalize.Rd b/man/img_equalize.Rd index 8137f3c..b49a1a4 100644 --- a/man/img_equalize.Rd +++ b/man/img_equalize.Rd @@ -4,19 +4,14 @@ \alias{img_equalize} \title{Equalize} \usage{ -img_equalize(image, data_format = "channels_last", name = NULL) +img_equalize(image, bins = 256, name = NULL) } \arguments{ -\item{image}{A tensor of shape (num_images, num_rows, num_columns, num_channels) (NHWC), -or (num_images, num_channels, num_rows, num_columns) (NCHW), or -(num_rows, num_columns, num_channels) (HWC), or (num_channels, num_rows, num_columns) (CHW), -or (num_rows, num_columns) (HW). The rank must be statically known (the shape is -not TensorShape(None)).} +\item{image}{image} -\item{data_format}{Either 'channels_first' or 'channels_last'} +\item{bins}{The number of bins in the histogram.} -\item{name}{The name of the op. Returns: Image(s) with the same type and -shape as `images`, equalized.} +\item{name}{The name of the op. Returns: Image(s) with the same type and shape as `images`, equalized.} } \value{ Image(s) with the same type and shape as `images`, equalized. @@ -24,10 +19,3 @@ Image(s) with the same type and shape as `images`, equalized. \description{ Equalize image(s) } -\examples{ - -\dontrun{ -img_equalize(img) -} - -} diff --git a/man/metric_rsquare.Rd b/man/metric_rsquare.Rd index 4778046..098d29c 100644 --- a/man/metric_rsquare.Rd +++ b/man/metric_rsquare.Rd @@ -2,13 +2,7 @@ % Please edit documentation in R/metrics.R \name{metric_rsquare} \alias{metric_rsquare} -\title{RSquare - -This is also called as coefficient of determination. It tells how close -are data to the fitted regression line. Highest score can be 1.0 and it -indicates that the predictors perfectly accounts for variation in the target. -Score 0.0 indicates that the predictors do not account for variation in the -target. It can also be negative if the model is worse.} +\title{RSquare} \usage{ metric_rsquare( name = "r_square", @@ -33,8 +27,6 @@ metric_rsquare( r squared score: float } \description{ -RSquare - This is also called as coefficient of determination. It tells how close are data to the fitted regression line. Highest score can be 1.0 and it indicates that the predictors perfectly accounts for variation in the target. diff --git a/man/optimizer_conditional_gradient.Rd b/man/optimizer_conditional_gradient.Rd index 3d88109..80ce9ea 100644 --- a/man/optimizer_conditional_gradient.Rd +++ b/man/optimizer_conditional_gradient.Rd @@ -6,14 +6,11 @@ \usage{ optimizer_conditional_gradient( learning_rate, - lambda_, + lambda_ = 0.01, epsilon = 1e-07, - use_locking = FALSE, + ord = "fro", name = "ConditionalGradient", - clipnorm = NULL, - clipvalue = NULL, - decay = NULL, - lr = NULL + ... ) } \arguments{ @@ -23,17 +20,11 @@ optimizer_conditional_gradient( \item{epsilon}{A Tensor or a floating point value. A small constant for numerical stability when handling the case of norm of gradient to be zero.} -\item{use_locking}{If True, use locks for update operations.} +\item{ord}{Order of the norm. Supported values are 'fro' and 'nuclear'. Default is 'fro', which is frobenius norm.} \item{name}{Optional name prefix for the operations created when applying gradients. Defaults to 'ConditionalGradient'.} -\item{clipnorm}{is clip gradients by norm.} - -\item{clipvalue}{is clip gradients by value.} - -\item{decay}{is included for backward compatibility to allow time inverse decay of learning rate.} - -\item{lr}{is included for backward compatibility, recommended to use learning_rate instead.} +\item{...}{keyword arguments. Allowed to be {clipnorm, clipvalue, lr, decay}. clipnorm is clip gradients by norm; clipvalue is clip gradients by value, decay is included for backward compatibility to allow time inverse decay of learning rate. lr is included for backward compatibility, recommended to use learning_rate instead.} } \value{ Optimizer for use with `keras::compile()` diff --git a/man/optimizer_moving_average.Rd b/man/optimizer_moving_average.Rd index 50c0bd8..5f43399 100644 --- a/man/optimizer_moving_average.Rd +++ b/man/optimizer_moving_average.Rd @@ -6,38 +6,30 @@ \usage{ optimizer_moving_average( optimizer, - sequential_update = TRUE, average_decay = 0.99, num_updates = NULL, + start_step = 0, + dynamic_decay = FALSE, name = "MovingAverage", - clipnorm = NULL, - clipvalue = NULL, - decay = NULL, - lr = NULL + ... ) } \arguments{ \item{optimizer}{str or tf$keras$optimizers$Optimizer that will be used to compute and apply gradients.} -\item{sequential_update}{Bool. If False, will compute the moving average at the same -time as the model is updated, potentially doing benign data races. If True, will update -the moving average after gradient updates.} - \item{average_decay}{float. Decay to use to maintain the moving averages of trained variables.} \item{num_updates}{Optional count of the number of updates applied to variables.} -\item{name}{Optional name for the operations created when applying gradients. -Defaults to "MovingAverage".} - -\item{clipnorm}{is clip gradients by norm.} +\item{start_step}{int. What step to start the moving average.} -\item{clipvalue}{is clip gradients by value.} +\item{dynamic_decay}{bool. Whether to change the decay based on the number of optimizer updates. Decay will start at 0.1 and gradually increase up to average_decay after each optimizer update.} -\item{decay}{is included for backward compatibility to allow time inverse decay of learning rate.} +\item{name}{Optional name for the operations created when applying gradients. +Defaults to "MovingAverage".} -\item{lr}{is included for backward compatibility, recommended to use learning_rate instead.} +\item{...}{keyword arguments. Allowed to be {clipnorm, clipvalue, lr, decay}. clipnorm is clip gradients by norm; clipvalue is clip gradients by value, decay is included for backward compatibility to allow time inverse decay of learning rate. lr is included for backward compatibility, recommended to use learning_rate instead.} } \value{ Optimizer for use with `keras::compile()` diff --git a/man/optimizer_swa.Rd b/man/optimizer_swa.Rd index ca7c6b6..8cc0d97 100644 --- a/man/optimizer_swa.Rd +++ b/man/optimizer_swa.Rd @@ -9,11 +9,7 @@ optimizer_swa( start_averaging = 0, average_period = 10, name = "SWA", - sequential_update = TRUE, - clipnorm = NULL, - clipvalue = NULL, - decay = NULL, - lr = NULL + ... ) } \arguments{ @@ -28,17 +24,7 @@ average_period steps. Averaging period needs to be >= 1.} \item{name}{Optional name for the operations created when applying gradients. Defaults to 'SWA'.} -\item{sequential_update}{Bool. If FALSE, will compute the moving average at the same time as the -model is updated, potentially doing benign data races. If True, will update the moving average -after gradient updates} - -\item{clipnorm}{is clip gradients by norm.} - -\item{clipvalue}{is clip gradients by value.} - -\item{decay}{is included for backward compatibility to allow time inverse decay of learning rate.} - -\item{lr}{is included for backward compatibility, recommended to use learning_rate instead.} +\item{...}{keyword arguments. Allowed to be {clipnorm, clipvalue, lr, decay}. clipnorm is clip gradients by norm; clipvalue is clip gradients by value, decay is included for backward compatibility to allow time inverse decay of learning rate. lr is included for backward compatibility, recommended to use learning_rate instead.} } \value{ Optimizer for use with `keras::compile()` diff --git a/tests/testthat/test-callbacks.R b/tests/testthat/test-callbacks.R index a8d52b2..8cf735a 100644 --- a/tests/testthat/test-callbacks.R +++ b/tests/testthat/test-callbacks.R @@ -4,10 +4,10 @@ source("utils.R") test_succeeds("data is generated", { - x_data <- matrix(data = runif(500,0,1),nrow = 50,ncol = 5) + x_data <- matrix(data = runif(250,0,1),nrow = 50,ncol = 5) y_data <- ifelse(runif(50,0,1) > 0.6, 1L,0L) %>% as.matrix() - x_data2 <- matrix(data = runif(500,0,1),nrow = 50,ncol = 5) + x_data2 <- matrix(data = runif(250,0,1),nrow = 50,ncol = 5) y_data2 <- ifelse(runif(50,0,1) > 0.6, 1L,0L) %>% as.matrix() }) @@ -21,7 +21,7 @@ test_succeeds("callback_time_stopping", { epochs = 1, verbose=0, validation_data = list(x_data2,y_data2), - callbacks = list(tfaddons::callback_time_stopping(seconds = 1) + callbacks = list(tfaddons::callback_time_stopping(seconds = 10) )) })